summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2020-09-23 11:56:46 +0100
committerStefan Hajnoczi <stefanha@redhat.com>2020-09-23 16:07:44 +0100
commitd73415a315471ac0b127ed3fad45c8ec5d711de1 (patch)
treebae20b3a39968fdfb4340b1a39b533333a8e6fd0 /tests
parented7db34b5aedba4487fd949b2e545eef954f093e (diff)
qemu/atomic.h: rename atomic_ to qatomic_
clang's C11 atomic_fetch_*() functions only take a C11 atomic type pointer argument. QEMU uses direct types (int, etc) and this causes a compiler error when a QEMU code calls these functions in a source file that also included <stdatomic.h> via a system header file: $ CC=clang CXX=clang++ ./configure ... && make ../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid) Avoid using atomic_*() names in QEMU's atomic.h since that namespace is used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h and <stdatomic.h> can co-exist. I checked /usr/include on my machine and searched GitHub for existing "qatomic_" users but there seem to be none. This patch was generated using: $ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \ sort -u >/tmp/changed_identifiers $ for identifier in $(</tmp/changed_identifiers); do sed -i "s%\<$identifier\>%q$identifier%g" \ $(git grep -I -l "\<$identifier\>") done I manually fixed line-wrap issues and misaligned rST tables. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200923105646.47864-1-stefanha@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/atomic64-bench.c14
-rw-r--r--tests/atomic_add-bench.c14
-rw-r--r--tests/iothread.c2
-rw-r--r--tests/qht-bench.c12
-rw-r--r--tests/rcutorture.c24
-rw-r--r--tests/test-aio-multithread.c52
-rw-r--r--tests/test-logging.c4
-rw-r--r--tests/test-rcu-list.c38
-rw-r--r--tests/test-thread-pool.c10
9 files changed, 86 insertions, 84 deletions
diff --git a/tests/atomic64-bench.c b/tests/atomic64-bench.c
index 121a8c14f4..e474753d34 100644
--- a/tests/atomic64-bench.c
+++ b/tests/atomic64-bench.c
@@ -56,17 +56,17 @@ static void *thread_func(void *arg)
{
struct thread_info *info = arg;
- atomic_inc(&n_ready_threads);
- while (!atomic_read(&test_start)) {
+ qatomic_inc(&n_ready_threads);
+ while (!qatomic_read(&test_start)) {
cpu_relax();
}
- while (!atomic_read(&test_stop)) {
+ while (!qatomic_read(&test_stop)) {
unsigned int index;
info->r = xorshift64star(info->r);
index = info->r & (range - 1);
- atomic_read_i64(&counts[index].i64);
+ qatomic_read_i64(&counts[index].i64);
info->accesses++;
}
return NULL;
@@ -76,13 +76,13 @@ static void run_test(void)
{
unsigned int i;
- while (atomic_read(&n_ready_threads) != n_threads) {
+ while (qatomic_read(&n_ready_threads) != n_threads) {
cpu_relax();
}
- atomic_set(&test_start, true);
+ qatomic_set(&test_start, true);
g_usleep(duration * G_USEC_PER_SEC);
- atomic_set(&test_stop, true);
+ qatomic_set(&test_stop, true);
for (i = 0; i < n_threads; i++) {
qemu_thread_join(&threads[i]);
diff --git a/tests/atomic_add-bench.c b/tests/atomic_add-bench.c
index 5666f6bbff..f05471ab45 100644
--- a/tests/atomic_add-bench.c
+++ b/tests/atomic_add-bench.c
@@ -53,12 +53,12 @@ static void *thread_func(void *arg)
{
struct thread_info *info = arg;
- atomic_inc(&n_ready_threads);
- while (!atomic_read(&test_start)) {
+ qatomic_inc(&n_ready_threads);
+ while (!qatomic_read(&test_start)) {
cpu_relax();
}
- while (!atomic_read(&test_stop)) {
+ while (!qatomic_read(&test_stop)) {
unsigned int index;
info->r = xorshift64star(info->r);
@@ -68,7 +68,7 @@ static void *thread_func(void *arg)
counts[index].val += 1;
qemu_mutex_unlock(&counts[index].lock);
} else {
- atomic_inc(&counts[index].val);
+ qatomic_inc(&counts[index].val);
}
}
return NULL;
@@ -78,13 +78,13 @@ static void run_test(void)
{
unsigned int i;
- while (atomic_read(&n_ready_threads) != n_threads) {
+ while (qatomic_read(&n_ready_threads) != n_threads) {
cpu_relax();
}
- atomic_set(&test_start, true);
+ qatomic_set(&test_start, true);
g_usleep(duration * G_USEC_PER_SEC);
- atomic_set(&test_stop, true);
+ qatomic_set(&test_stop, true);
for (i = 0; i < n_threads; i++) {
qemu_thread_join(&threads[i]);
diff --git a/tests/iothread.c b/tests/iothread.c
index d3a2ee9a01..afde12b4ef 100644
--- a/tests/iothread.c
+++ b/tests/iothread.c
@@ -74,7 +74,7 @@ static void *iothread_run(void *opaque)
qemu_cond_signal(&iothread->init_done_cond);
qemu_mutex_unlock(&iothread->init_done_lock);
- while (!atomic_read(&iothread->stopping)) {
+ while (!qatomic_read(&iothread->stopping)) {
aio_poll(iothread->ctx, true);
}
diff --git a/tests/qht-bench.c b/tests/qht-bench.c
index 362f03cb03..2e5b70ccd0 100644
--- a/tests/qht-bench.c
+++ b/tests/qht-bench.c
@@ -209,13 +209,13 @@ static void *thread_func(void *p)
rcu_register_thread();
- atomic_inc(&n_ready_threads);
- while (!atomic_read(&test_start)) {
+ qatomic_inc(&n_ready_threads);
+ while (!qatomic_read(&test_start)) {
cpu_relax();
}
rcu_read_lock();
- while (!atomic_read(&test_stop)) {
+ while (!qatomic_read(&test_stop)) {
info->seed = xorshift64star(info->seed);
info->func(info);
}
@@ -423,13 +423,13 @@ static void run_test(void)
{
int i;
- while (atomic_read(&n_ready_threads) != n_rw_threads + n_rz_threads) {
+ while (qatomic_read(&n_ready_threads) != n_rw_threads + n_rz_threads) {
cpu_relax();
}
- atomic_set(&test_start, true);
+ qatomic_set(&test_start, true);
g_usleep(duration * G_USEC_PER_SEC);
- atomic_set(&test_stop, true);
+ qatomic_set(&test_stop, true);
for (i = 0; i < n_rw_threads; i++) {
qemu_thread_join(&rw_threads[i]);
diff --git a/tests/rcutorture.c b/tests/rcutorture.c
index 732f03abda..de6f649058 100644
--- a/tests/rcutorture.c
+++ b/tests/rcutorture.c
@@ -123,7 +123,7 @@ static void *rcu_read_perf_test(void *arg)
rcu_register_thread();
*(struct rcu_reader_data **)arg = &rcu_reader;
- atomic_inc(&nthreadsrunning);
+ qatomic_inc(&nthreadsrunning);
while (goflag == GOFLAG_INIT) {
g_usleep(1000);
}
@@ -149,7 +149,7 @@ static void *rcu_update_perf_test(void *arg)
rcu_register_thread();
*(struct rcu_reader_data **)arg = &rcu_reader;
- atomic_inc(&nthreadsrunning);
+ qatomic_inc(&nthreadsrunning);
while (goflag == GOFLAG_INIT) {
g_usleep(1000);
}
@@ -172,7 +172,7 @@ static void perftestinit(void)
static void perftestrun(int nthreads, int duration, int nreaders, int nupdaters)
{
- while (atomic_read(&nthreadsrunning) < nthreads) {
+ while (qatomic_read(&nthreadsrunning) < nthreads) {
g_usleep(1000);
}
goflag = GOFLAG_RUN;
@@ -259,8 +259,8 @@ static void *rcu_read_stress_test(void *arg)
}
while (goflag == GOFLAG_RUN) {
rcu_read_lock();
- p = atomic_rcu_read(&rcu_stress_current);
- if (atomic_read(&p->mbtest) == 0) {
+ p = qatomic_rcu_read(&rcu_stress_current);
+ if (qatomic_read(&p->mbtest) == 0) {
n_mberror++;
}
rcu_read_lock();
@@ -268,7 +268,7 @@ static void *rcu_read_stress_test(void *arg)
garbage++;
}
rcu_read_unlock();
- pc = atomic_read(&p->age);
+ pc = qatomic_read(&p->age);
rcu_read_unlock();
if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) {
pc = RCU_STRESS_PIPE_LEN;
@@ -301,7 +301,7 @@ static void *rcu_read_stress_test(void *arg)
static void *rcu_update_stress_test(void *arg)
{
int i, rcu_stress_idx = 0;
- struct rcu_stress *cp = atomic_read(&rcu_stress_current);
+ struct rcu_stress *cp = qatomic_read(&rcu_stress_current);
rcu_register_thread();
*(struct rcu_reader_data **)arg = &rcu_reader;
@@ -319,11 +319,11 @@ static void *rcu_update_stress_test(void *arg)
p = &rcu_stress_array[rcu_stress_idx];
/* catching up with ourselves would be a bug */
assert(p != cp);
- atomic_set(&p->mbtest, 0);
+ qatomic_set(&p->mbtest, 0);
smp_mb();
- atomic_set(&p->age, 0);
- atomic_set(&p->mbtest, 1);
- atomic_rcu_set(&rcu_stress_current, p);
+ qatomic_set(&p->age, 0);
+ qatomic_set(&p->mbtest, 1);
+ qatomic_rcu_set(&rcu_stress_current, p);
cp = p;
/*
* New RCU structure is now live, update pipe counts on old
@@ -331,7 +331,7 @@ static void *rcu_update_stress_test(void *arg)
*/
for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) {
if (i != rcu_stress_idx) {
- atomic_set(&rcu_stress_array[i].age,
+ qatomic_set(&rcu_stress_array[i].age,
rcu_stress_array[i].age + 1);
}
}
diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
index d3144be7e0..a555cc8835 100644
--- a/tests/test-aio-multithread.c
+++ b/tests/test-aio-multithread.c
@@ -118,16 +118,16 @@ static bool schedule_next(int n)
{
Coroutine *co;
- co = atomic_xchg(&to_schedule[n], NULL);
+ co = qatomic_xchg(&to_schedule[n], NULL);
if (!co) {
- atomic_inc(&count_retry);
+ qatomic_inc(&count_retry);
return false;
}
if (n == id) {
- atomic_inc(&count_here);
+ qatomic_inc(&count_here);
} else {
- atomic_inc(&count_other);
+ qatomic_inc(&count_other);
}
aio_co_schedule(ctx[n], co);
@@ -143,13 +143,13 @@ static coroutine_fn void test_multi_co_schedule_entry(void *opaque)
{
g_assert(to_schedule[id] == NULL);
- while (!atomic_mb_read(&now_stopping)) {
+ while (!qatomic_mb_read(&now_stopping)) {
int n;
n = g_test_rand_int_range(0, NUM_CONTEXTS);
schedule_next(n);
- atomic_mb_set(&to_schedule[id], qemu_coroutine_self());
+ qatomic_mb_set(&to_schedule[id], qemu_coroutine_self());
qemu_coroutine_yield();
g_assert(to_schedule[id] == NULL);
}
@@ -171,7 +171,7 @@ static void test_multi_co_schedule(int seconds)
g_usleep(seconds * 1000000);
- atomic_mb_set(&now_stopping, true);
+ qatomic_mb_set(&now_stopping, true);
for (i = 0; i < NUM_CONTEXTS; i++) {
ctx_run(i, finish_cb, NULL);
to_schedule[i] = NULL;
@@ -202,7 +202,7 @@ static CoMutex comutex;
static void coroutine_fn test_multi_co_mutex_entry(void *opaque)
{
- while (!atomic_mb_read(&now_stopping)) {
+ while (!qatomic_mb_read(&now_stopping)) {
qemu_co_mutex_lock(&comutex);
counter++;
qemu_co_mutex_unlock(&comutex);
@@ -212,9 +212,9 @@ static void coroutine_fn test_multi_co_mutex_entry(void *opaque)
* exits before the coroutine is woken up, causing a spurious
* assertion failure.
*/
- atomic_inc(&atomic_counter);
+ qatomic_inc(&atomic_counter);
}
- atomic_dec(&running);
+ qatomic_dec(&running);
}
static void test_multi_co_mutex(int threads, int seconds)
@@ -236,7 +236,7 @@ static void test_multi_co_mutex(int threads, int seconds)
g_usleep(seconds * 1000000);
- atomic_mb_set(&now_stopping, true);
+ qatomic_mb_set(&now_stopping, true);
while (running > 0) {
g_usleep(100000);
}
@@ -296,9 +296,9 @@ static void mcs_mutex_lock(void)
nodes[id].next = -1;
nodes[id].locked = 1;
- prev = atomic_xchg(&mutex_head, id);
+ prev = qatomic_xchg(&mutex_head, id);
if (prev != -1) {
- atomic_set(&nodes[prev].next, id);
+ qatomic_set(&nodes[prev].next, id);
qemu_futex_wait(&nodes[id].locked, 1);
}
}
@@ -306,13 +306,13 @@ static void mcs_mutex_lock(void)
static void mcs_mutex_unlock(void)
{
int next;
- if (atomic_read(&nodes[id].next) == -1) {
- if (atomic_read(&mutex_head) == id &&
- atomic_cmpxchg(&mutex_head, id, -1) == id) {
+ if (qatomic_read(&nodes[id].next) == -1) {
+ if (qatomic_read(&mutex_head) == id &&
+ qatomic_cmpxchg(&mutex_head, id, -1) == id) {
/* Last item in the list, exit. */
return;
}
- while (atomic_read(&nodes[id].next) == -1) {
+ while (qatomic_read(&nodes[id].next) == -1) {
/* mcs_mutex_lock did the xchg, but has not updated
* nodes[prev].next yet.
*/
@@ -320,20 +320,20 @@ static void mcs_mutex_unlock(void)
}
/* Wake up the next in line. */
- next = atomic_read(&nodes[id].next);
+ next = qatomic_read(&nodes[id].next);
nodes[next].locked = 0;
qemu_futex_wake(&nodes[next].locked, 1);
}
static void test_multi_fair_mutex_entry(void *opaque)
{
- while (!atomic_mb_read(&now_stopping)) {
+ while (!qatomic_mb_read(&now_stopping)) {
mcs_mutex_lock();
counter++;
mcs_mutex_unlock();
- atomic_inc(&atomic_counter);
+ qatomic_inc(&atomic_counter);
}
- atomic_dec(&running);
+ qatomic_dec(&running);
}
static void test_multi_fair_mutex(int threads, int seconds)
@@ -355,7 +355,7 @@ static void test_multi_fair_mutex(int threads, int seconds)
g_usleep(seconds * 1000000);
- atomic_mb_set(&now_stopping, true);
+ qatomic_mb_set(&now_stopping, true);
while (running > 0) {
g_usleep(100000);
}
@@ -383,13 +383,13 @@ static QemuMutex mutex;
static void test_multi_mutex_entry(void *opaque)
{
- while (!atomic_mb_read(&now_stopping)) {
+ while (!qatomic_mb_read(&now_stopping)) {
qemu_mutex_lock(&mutex);
counter++;
qemu_mutex_unlock(&mutex);
- atomic_inc(&atomic_counter);
+ qatomic_inc(&atomic_counter);
}
- atomic_dec(&running);
+ qatomic_dec(&running);
}
static void test_multi_mutex(int threads, int seconds)
@@ -411,7 +411,7 @@ static void test_multi_mutex(int threads, int seconds)
g_usleep(seconds * 1000000);
- atomic_mb_set(&now_stopping, true);
+ qatomic_mb_set(&now_stopping, true);
while (running > 0) {
g_usleep(100000);
}
diff --git a/tests/test-logging.c b/tests/test-logging.c
index 8b1522cfed..ccb819f193 100644
--- a/tests/test-logging.c
+++ b/tests/test-logging.c
@@ -133,7 +133,7 @@ static void test_logfile_write(gconstpointer data)
*/
qemu_set_log_filename(file_path, &error_abort);
rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
+ logfile = qatomic_rcu_read(&qemu_logfile);
orig_fd = logfile->fd;
g_assert(logfile && logfile->fd);
fprintf(logfile->fd, "%s 1st write to file\n", __func__);
@@ -141,7 +141,7 @@ static void test_logfile_write(gconstpointer data)
/* Change the logfile and ensure that the handle is still valid. */
qemu_set_log_filename(file_path1, &error_abort);
- logfile2 = atomic_rcu_read(&qemu_logfile);
+ logfile2 = qatomic_rcu_read(&qemu_logfile);
g_assert(logfile->fd == orig_fd);
g_assert(logfile2->fd != logfile->fd);
fprintf(logfile->fd, "%s 2nd write to file\n", __func__);
diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
index 92be51ec50..49641e1936 100644
--- a/tests/test-rcu-list.c
+++ b/tests/test-rcu-list.c
@@ -106,7 +106,7 @@ static void reclaim_list_el(struct rcu_head *prcu)
struct list_element *el = container_of(prcu, struct list_element, rcu);
g_free(el);
/* Accessed only from call_rcu thread. */
- atomic_set_i64(&n_reclaims, n_reclaims + 1);
+ qatomic_set_i64(&n_reclaims, n_reclaims + 1);
}
#if TEST_LIST_TYPE == 1
@@ -172,16 +172,16 @@ static void *rcu_q_reader(void *arg)
rcu_register_thread();
*(struct rcu_reader_data **)arg = &rcu_reader;
- atomic_inc(&nthreadsrunning);
- while (atomic_read(&goflag) == GOFLAG_INIT) {
+ qatomic_inc(&nthreadsrunning);
+ while (qatomic_read(&goflag) == GOFLAG_INIT) {
g_usleep(1000);
}
- while (atomic_read(&goflag) == GOFLAG_RUN) {
+ while (qatomic_read(&goflag) == GOFLAG_RUN) {
rcu_read_lock();
TEST_LIST_FOREACH_RCU(el, &Q_list_head, entry) {
n_reads_local++;
- if (atomic_read(&goflag) == GOFLAG_STOP) {
+ if (qatomic_read(&goflag) == GOFLAG_STOP) {
break;
}
}
@@ -207,12 +207,12 @@ static void *rcu_q_updater(void *arg)
struct list_element *el, *prev_el;
*(struct rcu_reader_data **)arg = &rcu_reader;
- atomic_inc(&nthreadsrunning);
- while (atomic_read(&goflag) == GOFLAG_INIT) {
+ qatomic_inc(&nthreadsrunning);
+ while (qatomic_read(&goflag) == GOFLAG_INIT) {
g_usleep(1000);
}
- while (atomic_read(&goflag) == GOFLAG_RUN) {
+ while (qatomic_read(&goflag) == GOFLAG_RUN) {
target_el = select_random_el(RCU_Q_LEN);
j = 0;
/* FOREACH_RCU could work here but let's use both macros */
@@ -226,7 +226,7 @@ static void *rcu_q_updater(void *arg)
break;
}
}
- if (atomic_read(&goflag) == GOFLAG_STOP) {
+ if (qatomic_read(&goflag) == GOFLAG_STOP) {
break;
}
target_el = select_random_el(RCU_Q_LEN);
@@ -248,7 +248,7 @@ static void *rcu_q_updater(void *arg)
qemu_mutex_lock(&counts_mutex);
n_nodes += n_nodes_local;
n_updates += n_updates_local;
- atomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local);
+ qatomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local);
qemu_mutex_unlock(&counts_mutex);
return NULL;
}
@@ -271,13 +271,13 @@ static void rcu_qtest_init(void)
static void rcu_qtest_run(int duration, int nreaders)
{
int nthreads = nreaders + 1;
- while (atomic_read(&nthreadsrunning) < nthreads) {
+ while (qatomic_read(&nthreadsrunning) < nthreads) {
g_usleep(1000);
}
- atomic_set(&goflag, GOFLAG_RUN);
+ qatomic_set(&goflag, GOFLAG_RUN);
sleep(duration);
- atomic_set(&goflag, GOFLAG_STOP);
+ qatomic_set(&goflag, GOFLAG_STOP);
wait_all_threads();
}
@@ -302,21 +302,23 @@ static void rcu_qtest(const char *test, int duration, int nreaders)
n_removed_local++;
}
qemu_mutex_lock(&counts_mutex);
- atomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local);
+ qatomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local);
qemu_mutex_unlock(&counts_mutex);
synchronize_rcu();
- while (atomic_read_i64(&n_nodes_removed) > atomic_read_i64(&n_reclaims)) {
+ while (qatomic_read_i64(&n_nodes_removed) >
+ qatomic_read_i64(&n_reclaims)) {
g_usleep(100);
synchronize_rcu();
}
if (g_test_in_charge) {
- g_assert_cmpint(atomic_read_i64(&n_nodes_removed), ==,
- atomic_read_i64(&n_reclaims));
+ g_assert_cmpint(qatomic_read_i64(&n_nodes_removed), ==,
+ qatomic_read_i64(&n_reclaims));
} else {
printf("%s: %d readers; 1 updater; nodes read: " \
"%lld, nodes removed: %"PRIi64"; nodes reclaimed: %"PRIi64"\n",
test, nthreadsrunning - 1, n_reads,
- atomic_read_i64(&n_nodes_removed), atomic_read_i64(&n_reclaims));
+ qatomic_read_i64(&n_nodes_removed),
+ qatomic_read_i64(&n_reclaims));
exit(0);
}
}
diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c
index 0b675923f6..70dc6314a1 100644
--- a/tests/test-thread-pool.c
+++ b/tests/test-thread-pool.c
@@ -21,15 +21,15 @@ typedef struct {
static int worker_cb(void *opaque)
{
WorkerTestData *data = opaque;
- return atomic_fetch_inc(&data->n);
+ return qatomic_fetch_inc(&data->n);
}
static int long_cb(void *opaque)
{
WorkerTestData *data = opaque;
- if (atomic_cmpxchg(&data->n, 0, 1) == 0) {
+ if (qatomic_cmpxchg(&data->n, 0, 1) == 0) {
g_usleep(2000000);
- atomic_or(&data->n, 2);
+ qatomic_or(&data->n, 2);
}
return 0;
}
@@ -172,7 +172,7 @@ static void do_test_cancel(bool sync)
/* Cancel the jobs that haven't been started yet. */
num_canceled = 0;
for (i = 0; i < 100; i++) {
- if (atomic_cmpxchg(&data[i].n, 0, 4) == 0) {
+ if (qatomic_cmpxchg(&data[i].n, 0, 4) == 0) {
data[i].ret = -ECANCELED;
if (sync) {
bdrv_aio_cancel(data[i].aiocb);
@@ -186,7 +186,7 @@ static void do_test_cancel(bool sync)
g_assert_cmpint(num_canceled, <, 100);
for (i = 0; i < 100; i++) {
- if (data[i].aiocb && atomic_read(&data[i].n) < 4) {
+ if (data[i].aiocb && qatomic_read(&data[i].n) < 4) {
if (sync) {
/* Canceling the others will be a blocking operation. */
bdrv_aio_cancel(data[i].aiocb);