diff mbox series

[v7,4/6] KVM: x86: Take the pvclock sync lock behind the tsc_write_lock

Message ID 20210816001130.3059564-5-oupton@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: Add idempotent controls for migrating system counter state | expand

Commit Message

Oliver Upton Aug. 16, 2021, 12:11 a.m. UTC
A later change requires that the pvclock sync lock be taken while
holding the tsc_write_lock. Change the locking in kvm_synchronize_tsc()
to align with the requirement to isolate the locking change to its own
commit.

Cc: Sean Christopherson <seanjc@google.com>
Signed-off-by: Oliver Upton <oupton@google.com>
---
 Documentation/virt/kvm/locking.rst | 11 +++++++++++
 arch/x86/kvm/x86.c                 |  2 +-
 2 files changed, 12 insertions(+), 1 deletion(-)

Comments

Sean Christopherson Sept. 2, 2021, 7:22 p.m. UTC | #1
On Mon, Aug 16, 2021, Oliver Upton wrote:
> A later change requires that the pvclock sync lock be taken while
> holding the tsc_write_lock. Change the locking in kvm_synchronize_tsc()
> to align with the requirement to isolate the locking change to its own
> commit.
> 
> Cc: Sean Christopherson <seanjc@google.com>
> Signed-off-by: Oliver Upton <oupton@google.com>
> ---
>  Documentation/virt/kvm/locking.rst | 11 +++++++++++
>  arch/x86/kvm/x86.c                 |  2 +-
>  2 files changed, 12 insertions(+), 1 deletion(-)
> 
> diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
> index 8138201efb09..0bf346adac2a 100644
> --- a/Documentation/virt/kvm/locking.rst
> +++ b/Documentation/virt/kvm/locking.rst
> @@ -36,6 +36,9 @@ On x86:
>    holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise
>    there's no need to take kvm->arch.tdp_mmu_pages_lock at all).
>  
> +- kvm->arch.tsc_write_lock is taken outside
> +  kvm->arch.pvclock_gtod_sync_lock
> +
>  Everything else is a leaf: no other lock is taken inside the critical
>  sections.
>  
> @@ -222,6 +225,14 @@ time it will be set using the Dirty tracking mechanism described above.
>  :Comment:	'raw' because hardware enabling/disabling must be atomic /wrt
>  		migration.
>  
> +:Name:		kvm_arch::pvclock_gtod_sync_lock
> +:Type:		raw_spinlock_t
> +:Arch:		x86
> +:Protects:	kvm_arch::{cur_tsc_generation,cur_tsc_nsec,cur_tsc_write,
> +			cur_tsc_offset,nr_vcpus_matched_tsc}
> +:Comment:	'raw' because updating the kvm master clock must not be
> +		preempted.
> +
>  :Name:		kvm_arch::tsc_write_lock
>  :Type:		raw_spinlock
>  :Arch:		x86
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index b1e9a4885be6..f1434cd388b9 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2533,7 +2533,6 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
>  	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
>  
>  	kvm_vcpu_write_tsc_offset(vcpu, offset);
> -	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
>  
>  	spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
>  	if (!matched) {
> @@ -2544,6 +2543,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
>  
>  	kvm_track_tsc_matching(vcpu);
>  	spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);

Drop the irqsave/irqrestore in this patch instead of doing so while refactoring
the code in the next patch.

> +	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
>  }
>  
>  static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
> -- 
> 2.33.0.rc1.237.g0d66db33f3-goog
>
diff mbox series

Patch

diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index 8138201efb09..0bf346adac2a 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -36,6 +36,9 @@  On x86:
   holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise
   there's no need to take kvm->arch.tdp_mmu_pages_lock at all).
 
+- kvm->arch.tsc_write_lock is taken outside
+  kvm->arch.pvclock_gtod_sync_lock
+
 Everything else is a leaf: no other lock is taken inside the critical
 sections.
 
@@ -222,6 +225,14 @@  time it will be set using the Dirty tracking mechanism described above.
 :Comment:	'raw' because hardware enabling/disabling must be atomic /wrt
 		migration.
 
+:Name:		kvm_arch::pvclock_gtod_sync_lock
+:Type:		raw_spinlock_t
+:Arch:		x86
+:Protects:	kvm_arch::{cur_tsc_generation,cur_tsc_nsec,cur_tsc_write,
+			cur_tsc_offset,nr_vcpus_matched_tsc}
+:Comment:	'raw' because updating the kvm master clock must not be
+		preempted.
+
 :Name:		kvm_arch::tsc_write_lock
 :Type:		raw_spinlock
 :Arch:		x86
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b1e9a4885be6..f1434cd388b9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2533,7 +2533,6 @@  static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
 	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
 
 	kvm_vcpu_write_tsc_offset(vcpu, offset);
-	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
 	spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
 	if (!matched) {
@@ -2544,6 +2543,7 @@  static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
 
 	kvm_track_tsc_matching(vcpu);
 	spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
+	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 }
 
 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,