summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorTyler Retzlaff <roretzla@linux.microsoft.com>2024-02-14 22:50:54 -0800
committerThomas Monjalon <thomas@monjalon.net>2024-02-18 13:12:33 +0100
commit283d843722f11c4cb4714fa8661f4cfb7986b0e6 (patch)
treeab748b1176e063ce47207dac28e6a4e093804be4 /lib
parent93998f3c5f22747e4f2c5e8714fa5cbe6c9d1574 (diff)
lib: use atomic thread fence recommended API
Use rte_atomic_thread_fence() instead of directly using __atomic_thread_fence() builtin GCC intrinsic or __rte_atomic_thread_fence() internal function. Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com> Acked-by: Morten Brørup <mb@smartsharesystems.com> Acked-by: Chengwen Feng <fengchengwen@huawei.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/distributor/rte_distributor.c2
-rw-r--r--lib/eal/common/eal_common_trace.c2
-rw-r--r--lib/eal/include/rte_mcslock.h4
-rw-r--r--lib/hash/rte_cuckoo_hash.c10
-rw-r--r--lib/lpm/rte_lpm.c4
-rw-r--r--lib/ring/rte_ring_c11_pvt.h4
-rw-r--r--lib/stack/rte_stack_lf_c11.h2
7 files changed, 14 insertions, 14 deletions
diff --git a/lib/distributor/rte_distributor.c b/lib/distributor/rte_distributor.c
index 2ecb95c3e5..e842dc9959 100644
--- a/lib/distributor/rte_distributor.c
+++ b/lib/distributor/rte_distributor.c
@@ -187,7 +187,7 @@ rte_distributor_return_pkt(struct rte_distributor *d,
}
/* Sync with distributor to acquire retptrs */
- __atomic_thread_fence(rte_memory_order_acquire);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
/* Switch off the return bit first */
buf->retptr64[i] = 0;
diff --git a/lib/eal/common/eal_common_trace.c b/lib/eal/common/eal_common_trace.c
index 6ad87fc0df..918f49bf4f 100644
--- a/lib/eal/common/eal_common_trace.c
+++ b/lib/eal/common/eal_common_trace.c
@@ -526,7 +526,7 @@ __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
/* Add the trace point at tail */
STAILQ_INSERT_TAIL(&tp_list, tp, next);
- __atomic_thread_fence(rte_memory_order_release);
+ rte_atomic_thread_fence(rte_memory_order_release);
/* All Good !!! */
return 0;
diff --git a/lib/eal/include/rte_mcslock.h b/lib/eal/include/rte_mcslock.h
index 2ca967f9c1..0aeb1a09f4 100644
--- a/lib/eal/include/rte_mcslock.h
+++ b/lib/eal/include/rte_mcslock.h
@@ -83,7 +83,7 @@ rte_mcslock_lock(RTE_ATOMIC(rte_mcslock_t *) *msl, rte_mcslock_t *me)
* store to prev->next. Otherwise it will cause a deadlock. Need a
* store-load barrier.
*/
- __rte_atomic_thread_fence(rte_memory_order_acq_rel);
+ rte_atomic_thread_fence(rte_memory_order_acq_rel);
/* If the lock has already been acquired, it first atomically
* places the node at the end of the queue and then proceeds
* to spin on me->locked until the previous lock holder resets
@@ -117,7 +117,7 @@ rte_mcslock_unlock(RTE_ATOMIC(rte_mcslock_t *) *msl, RTE_ATOMIC(rte_mcslock_t *)
* while-loop first. This has the potential to cause a
* deadlock. Need a load barrier.
*/
- __rte_atomic_thread_fence(rte_memory_order_acquire);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* More nodes added to the queue by other CPUs.
* Wait until the next pointer is set.
*/
diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
index 70456754c4..9cf94645f6 100644
--- a/lib/hash/rte_cuckoo_hash.c
+++ b/lib/hash/rte_cuckoo_hash.c
@@ -878,7 +878,7 @@ rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
/* The store to sig_current should not
* move above the store to tbl_chng_cnt.
*/
- __atomic_thread_fence(rte_memory_order_release);
+ rte_atomic_thread_fence(rte_memory_order_release);
}
/* Need to swap current/alt sig to allow later
@@ -910,7 +910,7 @@ rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
/* The store to sig_current should not
* move above the store to tbl_chng_cnt.
*/
- __atomic_thread_fence(rte_memory_order_release);
+ rte_atomic_thread_fence(rte_memory_order_release);
}
curr_bkt->sig_current[curr_slot] = sig;
@@ -1403,7 +1403,7 @@ __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
/* The loads of sig_current in search_one_bucket
* should not move below the load from tbl_chng_cnt.
*/
- __atomic_thread_fence(rte_memory_order_acquire);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Re-read the table change counter to check if the
* table has changed during search. If yes, re-do
* the search.
@@ -1632,7 +1632,7 @@ __rte_hash_compact_ll(const struct rte_hash *h,
/* The store to sig_current should
* not move above the store to tbl_chng_cnt.
*/
- __atomic_thread_fence(rte_memory_order_release);
+ rte_atomic_thread_fence(rte_memory_order_release);
}
last_bkt->sig_current[i] = NULL_SIGNATURE;
rte_atomic_store_explicit(&last_bkt->key_idx[i],
@@ -2223,7 +2223,7 @@ next_key:
/* The loads of sig_current in compare_signatures
* should not move below the load from tbl_chng_cnt.
*/
- __atomic_thread_fence(rte_memory_order_acquire);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Re-read the table change counter to check if the
* table has changed during search. If yes, re-do
* the search.
diff --git a/lib/lpm/rte_lpm.c b/lib/lpm/rte_lpm.c
index 363058e118..9633d637c7 100644
--- a/lib/lpm/rte_lpm.c
+++ b/lib/lpm/rte_lpm.c
@@ -1116,7 +1116,7 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
* Prevent the free of the tbl8 group from hoisting.
*/
i_lpm->lpm.tbl24[tbl24_index].valid = 0;
- __atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
status = tbl8_free(i_lpm, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
@@ -1132,7 +1132,7 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
*/
__atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
__ATOMIC_RELAXED);
- __atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
status = tbl8_free(i_lpm, tbl8_group_start);
}
#undef group_idx
diff --git a/lib/ring/rte_ring_c11_pvt.h b/lib/ring/rte_ring_c11_pvt.h
index 5c10ad88f5..629b2d9288 100644
--- a/lib/ring/rte_ring_c11_pvt.h
+++ b/lib/ring/rte_ring_c11_pvt.h
@@ -68,7 +68,7 @@ __rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
n = max;
/* Ensure the head is read before tail */
- __atomic_thread_fence(rte_memory_order_acquire);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* load-acquire synchronize with store-release of ht->tail
* in update_tail.
@@ -145,7 +145,7 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
n = max;
/* Ensure the head is read before tail */
- __atomic_thread_fence(rte_memory_order_acquire);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* this load-acquire synchronize with store-release of ht->tail
* in update_tail.
diff --git a/lib/stack/rte_stack_lf_c11.h b/lib/stack/rte_stack_lf_c11.h
index 9cb69983d5..60d46e963b 100644
--- a/lib/stack/rte_stack_lf_c11.h
+++ b/lib/stack/rte_stack_lf_c11.h
@@ -110,7 +110,7 @@ __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list,
* elements are properly ordered with respect to the head
* pointer read.
*/
- __atomic_thread_fence(rte_memory_order_acquire);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rte_prefetch0(old_head.top);