diff mbox

[PULL,22/69] rcutorture: remove synchronize_rcu from readers

Message ID 20180313224719.4954-23-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paolo Bonzini March 13, 2018, 10:46 p.m. UTC
This gives much worse numbers for readers, especially if synchronize_rcu
is made more expensive as is the case with --enable-membarrier.  Before:

   $ tests/rcutorture 10 stress 10
   n_reads: 98304  n_updates: 529  n_mberror: 0
   rcu_stress_count: 98302 2 0 0 0 0 0 0 0 0 0

After:

   $ tests/rcutorture 10 stress 10
   n_reads: 165158482  n_updates: 429  n_mberror: 0
   rcu_stress_count: 165154364 4118 0 0 0 0 0 0 0 0 0

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 tests/rcutorture.c | 4 ----
 1 file changed, 4 deletions(-)
diff mbox

Patch

diff --git a/tests/rcutorture.c b/tests/rcutorture.c
index 4002ecf123..49311c82ea 100644
--- a/tests/rcutorture.c
+++ b/tests/rcutorture.c
@@ -238,7 +238,6 @@  long long rcu_stress_count[RCU_STRESS_PIPE_LEN + 1];
 static void *rcu_read_stress_test(void *arg)
 {
     int i;
-    int itercnt = 0;
     struct rcu_stress *p;
     int pc;
     long long n_reads_local = 0;
@@ -269,9 +268,6 @@  static void *rcu_read_stress_test(void *arg)
         }
         rcu_stress_local[pc]++;
         n_reads_local++;
-        if ((++itercnt % 0x1000) == 0) {
-            synchronize_rcu();
-        }
     }
     qemu_mutex_lock(&counts_mutex);
     n_reads += n_reads_local;