summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnatoly Burakov <anatoly.burakov@intel.com>2021-01-14 14:46:04 +0000
committerThomas Monjalon <thomas@monjalon.net>2021-01-18 23:58:24 +0100
commit68fbbb8369dbe0c38b4464886855d937ce21a443 (patch)
treebb298f249981e125c2045a6a1b8f7eed12b8fb09
parent56833cbd3538b8edac3b3705f835cc56de41cfb0 (diff)
eal: avoid invalid power intrinsics API usage
Currently, the API documentation mandates that if the user wants to use the power management intrinsics, they need to call the `rte_cpu_get_intrinsics_support` API and check support for specific intrinsics. However, if the user does not do that, it is possible to get illegal instruction error because we're using raw instruction opcodes, which may or may not be supported at runtime. Now that we have everything in a C file, we can check for support at startup and prevent the user from possibly encountering illegal instruction errors. We also add return values to the API's as well, because why not. Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
-rw-r--r--lib/librte_eal/arm/rte_power_intrinsics.c12
-rw-r--r--lib/librte_eal/include/generic/rte_power_intrinsics.h24
-rw-r--r--lib/librte_eal/ppc/rte_power_intrinsics.c12
-rw-r--r--lib/librte_eal/x86/rte_power_intrinsics.c64
4 files changed, 94 insertions, 18 deletions
diff --git a/lib/librte_eal/arm/rte_power_intrinsics.c b/lib/librte_eal/arm/rte_power_intrinsics.c
index ab1f44f611..7e7552fa8a 100644
--- a/lib/librte_eal/arm/rte_power_intrinsics.c
+++ b/lib/librte_eal/arm/rte_power_intrinsics.c
@@ -7,7 +7,7 @@
/**
* This function is not supported on ARM.
*/
-void
+int
rte_power_monitor(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz)
@@ -17,12 +17,14 @@ rte_power_monitor(const volatile void *p, const uint64_t expected_value,
RTE_SET_USED(value_mask);
RTE_SET_USED(tsc_timestamp);
RTE_SET_USED(data_sz);
+
+ return -ENOTSUP;
}
/**
* This function is not supported on ARM.
*/
-void
+int
rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz, rte_spinlock_t *lck)
@@ -33,13 +35,17 @@ rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,
RTE_SET_USED(tsc_timestamp);
RTE_SET_USED(lck);
RTE_SET_USED(data_sz);
+
+ return -ENOTSUP;
}
/**
* This function is not supported on ARM.
*/
-void
+int
rte_power_pause(const uint64_t tsc_timestamp)
{
RTE_SET_USED(tsc_timestamp);
+
+ return -ENOTSUP;
}
diff --git a/lib/librte_eal/include/generic/rte_power_intrinsics.h b/lib/librte_eal/include/generic/rte_power_intrinsics.h
index 67977bd511..37e4ec0414 100644
--- a/lib/librte_eal/include/generic/rte_power_intrinsics.h
+++ b/lib/librte_eal/include/generic/rte_power_intrinsics.h
@@ -34,7 +34,6 @@
*
* @warning It is responsibility of the user to check if this function is
* supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
- * Failing to do so may result in an illegal CPU instruction error.
*
* @param p
* Address to monitor for changes.
@@ -50,9 +49,14 @@
* Data size (in bytes) that will be used to compare expected value with the
* memory address. Can be 1, 2, 4 or 8. Supplying any other value will lead
* to undefined result.
+ *
+ * @return
+ * 0 on success
+ * -EINVAL on invalid parameters
+ * -ENOTSUP if unsupported
*/
__rte_experimental
-void rte_power_monitor(const volatile void *p,
+int rte_power_monitor(const volatile void *p,
const uint64_t expected_value, const uint64_t value_mask,
const uint64_t tsc_timestamp, const uint8_t data_sz);
@@ -75,7 +79,6 @@ void rte_power_monitor(const volatile void *p,
*
* @warning It is responsibility of the user to check if this function is
* supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
- * Failing to do so may result in an illegal CPU instruction error.
*
* @param p
* Address to monitor for changes.
@@ -95,9 +98,14 @@ void rte_power_monitor(const volatile void *p,
* A spinlock that must be locked before entering the function, will be
* unlocked while the CPU is sleeping, and will be locked again once the CPU
* wakes up.
+ *
+ * @return
+ * 0 on success
+ * -EINVAL on invalid parameters
+ * -ENOTSUP if unsupported
*/
__rte_experimental
-void rte_power_monitor_sync(const volatile void *p,
+int rte_power_monitor_sync(const volatile void *p,
const uint64_t expected_value, const uint64_t value_mask,
const uint64_t tsc_timestamp, const uint8_t data_sz,
rte_spinlock_t *lck);
@@ -111,13 +119,17 @@ void rte_power_monitor_sync(const volatile void *p,
*
* @warning It is responsibility of the user to check if this function is
* supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
- * Failing to do so may result in an illegal CPU instruction error.
*
* @param tsc_timestamp
* Maximum TSC timestamp to wait for. Note that the wait behavior is
* architecture-dependent.
+ *
+ * @return
+ * 0 on success
+ * -EINVAL on invalid parameters
+ * -ENOTSUP if unsupported
*/
__rte_experimental
-void rte_power_pause(const uint64_t tsc_timestamp);
+int rte_power_pause(const uint64_t tsc_timestamp);
#endif /* _RTE_POWER_INTRINSIC_H_ */
diff --git a/lib/librte_eal/ppc/rte_power_intrinsics.c b/lib/librte_eal/ppc/rte_power_intrinsics.c
index 84340ca2a4..929e0611b0 100644
--- a/lib/librte_eal/ppc/rte_power_intrinsics.c
+++ b/lib/librte_eal/ppc/rte_power_intrinsics.c
@@ -7,7 +7,7 @@
/**
* This function is not supported on PPC64.
*/
-void
+int
rte_power_monitor(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz)
@@ -17,12 +17,14 @@ rte_power_monitor(const volatile void *p, const uint64_t expected_value,
RTE_SET_USED(value_mask);
RTE_SET_USED(tsc_timestamp);
RTE_SET_USED(data_sz);
+
+ return -ENOTSUP;
}
/**
* This function is not supported on PPC64.
*/
-void
+int
rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz, rte_spinlock_t *lck)
@@ -33,13 +35,17 @@ rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,
RTE_SET_USED(tsc_timestamp);
RTE_SET_USED(lck);
RTE_SET_USED(data_sz);
+
+ return -ENOTSUP;
}
/**
* This function is not supported on PPC64.
*/
-void
+int
rte_power_pause(const uint64_t tsc_timestamp)
{
RTE_SET_USED(tsc_timestamp);
+
+ return -ENOTSUP;
}
diff --git a/lib/librte_eal/x86/rte_power_intrinsics.c b/lib/librte_eal/x86/rte_power_intrinsics.c
index 34c5fd9c3e..2a38440bec 100644
--- a/lib/librte_eal/x86/rte_power_intrinsics.c
+++ b/lib/librte_eal/x86/rte_power_intrinsics.c
@@ -4,6 +4,8 @@
#include "rte_power_intrinsics.h"
+static bool wait_supported;
+
static inline uint64_t
__get_umwait_val(const volatile void *p, const uint8_t sz)
{
@@ -17,24 +19,47 @@ __get_umwait_val(const volatile void *p, const uint8_t sz)
case sizeof(uint64_t):
return *(const volatile uint64_t *)p;
default:
- /* this is an intrinsic, so we can't have any error handling */
+ /* shouldn't happen */
RTE_ASSERT(0);
return 0;
}
}
+static inline int
+__check_val_size(const uint8_t sz)
+{
+ switch (sz) {
+ case sizeof(uint8_t): /* fall-through */
+ case sizeof(uint16_t): /* fall-through */
+ case sizeof(uint32_t): /* fall-through */
+ case sizeof(uint64_t): /* fall-through */
+ return 0;
+ default:
+ /* unexpected size */
+ return -1;
+ }
+}
+
/**
* This function uses UMONITOR/UMWAIT instructions and will enter C0.2 state.
* For more information about usage of these instructions, please refer to
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual.
*/
-void
+int
rte_power_monitor(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz)
{
const uint32_t tsc_l = (uint32_t)tsc_timestamp;
const uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32);
+
+ /* prevent user from running this instruction if it's not supported */
+ if (!wait_supported)
+ return -ENOTSUP;
+
+ if (__check_val_size(data_sz) < 0)
+ return -EINVAL;
+
/*
* we're using raw byte codes for now as only the newest compiler
* versions support this instruction natively.
@@ -51,13 +76,15 @@ rte_power_monitor(const volatile void *p, const uint64_t expected_value,
/* if the masked value is already matching, abort */
if (masked == expected_value)
- return;
+ return 0;
}
/* execute UMWAIT */
asm volatile(".byte 0xf2, 0x0f, 0xae, 0xf7;"
: /* ignore rflags */
: "D"(0), /* enter C0.2 */
"a"(tsc_l), "d"(tsc_h));
+
+ return 0;
}
/**
@@ -65,13 +92,21 @@ rte_power_monitor(const volatile void *p, const uint64_t expected_value,
* For more information about usage of these instructions, please refer to
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual.
*/
-void
+int
rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,
const uint64_t value_mask, const uint64_t tsc_timestamp,
const uint8_t data_sz, rte_spinlock_t *lck)
{
const uint32_t tsc_l = (uint32_t)tsc_timestamp;
const uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32);
+
+ /* prevent user from running this instruction if it's not supported */
+ if (!wait_supported)
+ return -ENOTSUP;
+
+ if (__check_val_size(data_sz) < 0)
+ return -EINVAL;
+
/*
* we're using raw byte codes for now as only the newest compiler
* versions support this instruction natively.
@@ -88,7 +123,7 @@ rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,
/* if the masked value is already matching, abort */
if (masked == expected_value)
- return;
+ return 0;
}
rte_spinlock_unlock(lck);
@@ -99,6 +134,8 @@ rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,
"a"(tsc_l), "d"(tsc_h));
rte_spinlock_lock(lck);
+
+ return 0;
}
/**
@@ -106,15 +143,30 @@ rte_power_monitor_sync(const volatile void *p, const uint64_t expected_value,
* information about usage of this instruction, please refer to Intel(R) 64 and
* IA-32 Architectures Software Developer's Manual.
*/
-void
+int
rte_power_pause(const uint64_t tsc_timestamp)
{
const uint32_t tsc_l = (uint32_t)tsc_timestamp;
const uint32_t tsc_h = (uint32_t)(tsc_timestamp >> 32);
+ /* prevent user from running this instruction if it's not supported */
+ if (!wait_supported)
+ return -ENOTSUP;
+
/* execute TPAUSE */
asm volatile(".byte 0x66, 0x0f, 0xae, 0xf7;"
: /* ignore rflags */
: "D"(0), /* enter C0.2 */
"a"(tsc_l), "d"(tsc_h));
+
+ return 0;
+}
+
+RTE_INIT(rte_power_intrinsics_init) {
+ struct rte_cpu_intrinsics i;
+
+ rte_cpu_get_intrinsics_support(&i);
+
+ if (i.power_monitor && i.power_pause)
+ wait_supported = 1;
}