diff mbox

[04/10] seqlock: rename write_lock/unlock to write_begin/end

Message ID 1459834253-8291-5-git-send-email-cota@braap.org (mailing list archive)
State New, archived
Headers show

Commit Message

Emilio Cota April 5, 2016, 5:30 a.m. UTC
It is a more appropriate name, now that the mutex embedded
in the seqlock is gone.

Signed-off-by: Emilio G. Cota <cota@braap.org>
---
 cpus.c                 | 28 ++++++++++++++--------------
 include/qemu/seqlock.h |  4 ++--
 2 files changed, 16 insertions(+), 16 deletions(-)

Comments

Alex Bennée April 6, 2016, 8:42 a.m. UTC | #1
Emilio G. Cota <cota@braap.org> writes:

> It is a more appropriate name, now that the mutex embedded
> in the seqlock is gone.
>
> Signed-off-by: Emilio G. Cota <cota@braap.org>

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

> ---
>  cpus.c                 | 28 ++++++++++++++--------------
>  include/qemu/seqlock.h |  4 ++--
>  2 files changed, 16 insertions(+), 16 deletions(-)
>
> diff --git a/cpus.c b/cpus.c
> index 7f550b9..7dad2e6 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -247,13 +247,13 @@ int64_t cpu_get_clock(void)
>  void cpu_enable_ticks(void)
>  {
>      /* Here, the really thing protected by seqlock is cpu_clock_offset. */
> -    seqlock_write_lock(&timers_state.vm_clock_seqlock);
> +    seqlock_write_begin(&timers_state.vm_clock_seqlock);
>      if (!timers_state.cpu_ticks_enabled) {
>          timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
>          timers_state.cpu_clock_offset -= get_clock();
>          timers_state.cpu_ticks_enabled = 1;
>      }
> -    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
> +    seqlock_write_end(&timers_state.vm_clock_seqlock);
>  }
>
>  /* disable cpu_get_ticks() : the clock is stopped. You must not call
> @@ -263,13 +263,13 @@ void cpu_enable_ticks(void)
>  void cpu_disable_ticks(void)
>  {
>      /* Here, the really thing protected by seqlock is cpu_clock_offset. */
> -    seqlock_write_lock(&timers_state.vm_clock_seqlock);
> +    seqlock_write_begin(&timers_state.vm_clock_seqlock);
>      if (timers_state.cpu_ticks_enabled) {
>          timers_state.cpu_ticks_offset += cpu_get_host_ticks();
>          timers_state.cpu_clock_offset = cpu_get_clock_locked();
>          timers_state.cpu_ticks_enabled = 0;
>      }
> -    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
> +    seqlock_write_end(&timers_state.vm_clock_seqlock);
>  }
>
>  /* Correlation between real and virtual time is always going to be
> @@ -292,7 +292,7 @@ static void icount_adjust(void)
>          return;
>      }
>
> -    seqlock_write_lock(&timers_state.vm_clock_seqlock);
> +    seqlock_write_begin(&timers_state.vm_clock_seqlock);
>      cur_time = cpu_get_clock_locked();
>      cur_icount = cpu_get_icount_locked();
>
> @@ -313,7 +313,7 @@ static void icount_adjust(void)
>      last_delta = delta;
>      timers_state.qemu_icount_bias = cur_icount
>                                - (timers_state.qemu_icount << icount_time_shift);
> -    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
> +    seqlock_write_end(&timers_state.vm_clock_seqlock);
>  }
>
>  static void icount_adjust_rt(void *opaque)
> @@ -345,7 +345,7 @@ static void icount_warp_rt(void)
>          return;
>      }
>
> -    seqlock_write_lock(&timers_state.vm_clock_seqlock);
> +    seqlock_write_begin(&timers_state.vm_clock_seqlock);
>      if (runstate_is_running()) {
>          int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
>                                       cpu_get_clock_locked());
> @@ -364,7 +364,7 @@ static void icount_warp_rt(void)
>          timers_state.qemu_icount_bias += warp_delta;
>      }
>      vm_clock_warp_start = -1;
> -    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
> +    seqlock_write_end(&timers_state.vm_clock_seqlock);
>
>      if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
>          qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
> @@ -389,9 +389,9 @@ void qtest_clock_warp(int64_t dest)
>          int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
>          int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
>
> -        seqlock_write_lock(&timers_state.vm_clock_seqlock);
> +        seqlock_write_begin(&timers_state.vm_clock_seqlock);
>          timers_state.qemu_icount_bias += warp;
> -        seqlock_write_unlock(&timers_state.vm_clock_seqlock);
> +        seqlock_write_end(&timers_state.vm_clock_seqlock);
>
>          qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
>          timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
> @@ -458,9 +458,9 @@ void qemu_start_warp_timer(void)
>               * It is useful when we want a deterministic execution time,
>               * isolated from host latencies.
>               */
> -            seqlock_write_lock(&timers_state.vm_clock_seqlock);
> +            seqlock_write_begin(&timers_state.vm_clock_seqlock);
>              timers_state.qemu_icount_bias += deadline;
> -            seqlock_write_unlock(&timers_state.vm_clock_seqlock);
> +            seqlock_write_end(&timers_state.vm_clock_seqlock);
>              qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
>          } else {
>              /*
> @@ -471,11 +471,11 @@ void qemu_start_warp_timer(void)
>               * you will not be sending network packets continuously instead of
>               * every 100ms.
>               */
> -            seqlock_write_lock(&timers_state.vm_clock_seqlock);
> +            seqlock_write_begin(&timers_state.vm_clock_seqlock);
>              if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
>                  vm_clock_warp_start = clock;
>              }
> -            seqlock_write_unlock(&timers_state.vm_clock_seqlock);
> +            seqlock_write_end(&timers_state.vm_clock_seqlock);
>              timer_mod_anticipate(icount_warp_timer, clock + deadline);
>          }
>      } else if (deadline == 0) {
> diff --git a/include/qemu/seqlock.h b/include/qemu/seqlock.h
> index e673482..4dfc055 100644
> --- a/include/qemu/seqlock.h
> +++ b/include/qemu/seqlock.h
> @@ -28,7 +28,7 @@ static inline void seqlock_init(QemuSeqLock *sl)
>  }
>
>  /* Lock out other writers and update the count.  */
> -static inline void seqlock_write_lock(QemuSeqLock *sl)
> +static inline void seqlock_write_begin(QemuSeqLock *sl)
>  {
>      ++sl->sequence;
>
> @@ -36,7 +36,7 @@ static inline void seqlock_write_lock(QemuSeqLock *sl)
>      smp_wmb();
>  }
>
> -static inline void seqlock_write_unlock(QemuSeqLock *sl)
> +static inline void seqlock_write_end(QemuSeqLock *sl)
>  {
>      /* Write other fields before finalizing sequence.  */
>      smp_wmb();


--
Alex Bennée
diff mbox

Patch

diff --git a/cpus.c b/cpus.c
index 7f550b9..7dad2e6 100644
--- a/cpus.c
+++ b/cpus.c
@@ -247,13 +247,13 @@  int64_t cpu_get_clock(void)
 void cpu_enable_ticks(void)
 {
     /* Here, the really thing protected by seqlock is cpu_clock_offset. */
-    seqlock_write_lock(&timers_state.vm_clock_seqlock);
+    seqlock_write_begin(&timers_state.vm_clock_seqlock);
     if (!timers_state.cpu_ticks_enabled) {
         timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
         timers_state.cpu_clock_offset -= get_clock();
         timers_state.cpu_ticks_enabled = 1;
     }
-    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+    seqlock_write_end(&timers_state.vm_clock_seqlock);
 }
 
 /* disable cpu_get_ticks() : the clock is stopped. You must not call
@@ -263,13 +263,13 @@  void cpu_enable_ticks(void)
 void cpu_disable_ticks(void)
 {
     /* Here, the really thing protected by seqlock is cpu_clock_offset. */
-    seqlock_write_lock(&timers_state.vm_clock_seqlock);
+    seqlock_write_begin(&timers_state.vm_clock_seqlock);
     if (timers_state.cpu_ticks_enabled) {
         timers_state.cpu_ticks_offset += cpu_get_host_ticks();
         timers_state.cpu_clock_offset = cpu_get_clock_locked();
         timers_state.cpu_ticks_enabled = 0;
     }
-    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+    seqlock_write_end(&timers_state.vm_clock_seqlock);
 }
 
 /* Correlation between real and virtual time is always going to be
@@ -292,7 +292,7 @@  static void icount_adjust(void)
         return;
     }
 
-    seqlock_write_lock(&timers_state.vm_clock_seqlock);
+    seqlock_write_begin(&timers_state.vm_clock_seqlock);
     cur_time = cpu_get_clock_locked();
     cur_icount = cpu_get_icount_locked();
 
@@ -313,7 +313,7 @@  static void icount_adjust(void)
     last_delta = delta;
     timers_state.qemu_icount_bias = cur_icount
                               - (timers_state.qemu_icount << icount_time_shift);
-    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+    seqlock_write_end(&timers_state.vm_clock_seqlock);
 }
 
 static void icount_adjust_rt(void *opaque)
@@ -345,7 +345,7 @@  static void icount_warp_rt(void)
         return;
     }
 
-    seqlock_write_lock(&timers_state.vm_clock_seqlock);
+    seqlock_write_begin(&timers_state.vm_clock_seqlock);
     if (runstate_is_running()) {
         int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
                                      cpu_get_clock_locked());
@@ -364,7 +364,7 @@  static void icount_warp_rt(void)
         timers_state.qemu_icount_bias += warp_delta;
     }
     vm_clock_warp_start = -1;
-    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+    seqlock_write_end(&timers_state.vm_clock_seqlock);
 
     if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
@@ -389,9 +389,9 @@  void qtest_clock_warp(int64_t dest)
         int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
         int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
 
-        seqlock_write_lock(&timers_state.vm_clock_seqlock);
+        seqlock_write_begin(&timers_state.vm_clock_seqlock);
         timers_state.qemu_icount_bias += warp;
-        seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+        seqlock_write_end(&timers_state.vm_clock_seqlock);
 
         qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
         timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
@@ -458,9 +458,9 @@  void qemu_start_warp_timer(void)
              * It is useful when we want a deterministic execution time,
              * isolated from host latencies.
              */
-            seqlock_write_lock(&timers_state.vm_clock_seqlock);
+            seqlock_write_begin(&timers_state.vm_clock_seqlock);
             timers_state.qemu_icount_bias += deadline;
-            seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+            seqlock_write_end(&timers_state.vm_clock_seqlock);
             qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
         } else {
             /*
@@ -471,11 +471,11 @@  void qemu_start_warp_timer(void)
              * you will not be sending network packets continuously instead of
              * every 100ms.
              */
-            seqlock_write_lock(&timers_state.vm_clock_seqlock);
+            seqlock_write_begin(&timers_state.vm_clock_seqlock);
             if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
                 vm_clock_warp_start = clock;
             }
-            seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+            seqlock_write_end(&timers_state.vm_clock_seqlock);
             timer_mod_anticipate(icount_warp_timer, clock + deadline);
         }
     } else if (deadline == 0) {
diff --git a/include/qemu/seqlock.h b/include/qemu/seqlock.h
index e673482..4dfc055 100644
--- a/include/qemu/seqlock.h
+++ b/include/qemu/seqlock.h
@@ -28,7 +28,7 @@  static inline void seqlock_init(QemuSeqLock *sl)
 }
 
 /* Lock out other writers and update the count.  */
-static inline void seqlock_write_lock(QemuSeqLock *sl)
+static inline void seqlock_write_begin(QemuSeqLock *sl)
 {
     ++sl->sequence;
 
@@ -36,7 +36,7 @@  static inline void seqlock_write_lock(QemuSeqLock *sl)
     smp_wmb();
 }
 
-static inline void seqlock_write_unlock(QemuSeqLock *sl)
+static inline void seqlock_write_end(QemuSeqLock *sl)
 {
     /* Write other fields before finalizing sequence.  */
     smp_wmb();