diff mbox series

[2/8] KVM: pfncache: add a mark-dirty helper

Message ID 20230914084946.200043-3-paul@xen.org (mailing list archive)
State New, archived
Headers show
Series KVM: xen: update shared_info and vcpu_info handling | expand

Commit Message

Paul Durrant Sept. 14, 2023, 8:49 a.m. UTC
From: Paul Durrant <pdurrant@amazon.com>

At the moment pages are marked dirty by open-coded calls to
mark_page_dirty_in_slot(), directly deferefencing the gpa and memslot
from the cache. After a subsequent patch these may not always be set
so add a helper now so that caller will protected from need to know
about this detail.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: x86@kernel.org
---
 arch/x86/kvm/x86.c       |  2 +-
 arch/x86/kvm/xen.c       | 13 ++++++-------
 include/linux/kvm_host.h |  7 +++++++
 virt/kvm/pfncache.c      |  6 ++++++
 4 files changed, 20 insertions(+), 8 deletions(-)

Comments

David Woodhouse Sept. 14, 2023, 9:21 a.m. UTC | #1
On Thu, 2023-09-14 at 08:49 +0000, Paul Durrant wrote:
> --- a/arch/x86/kvm/xen.c
> +++ b/arch/x86/kvm/xen.c
> @@ -430,14 +430,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
>                 smp_wmb();
>         }
>  
> -       if (user_len2)
> +       if (user_len2) {
> +               kvm_gpc_mark_dirty(gpc2);
>                 read_unlock(&gpc2->lock);
> +       }
>  
> +       kvm_gpc_mark_dirty(gpc1);
>         read_unlock_irqrestore(&gpc1->lock, flags);
> -
> -       mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
> -       if (user_len2)
> -               mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
>  }
>  
>  void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)

ISTR there was a reason why the mark_page_dirty_in_slot() was called
*after* unlocking. Although now I say it, that seems wrong... is that
because the spinlock is only protecting the uHVA→kHVA mapping, while
the memslot/gpa are going to remain valid even after unlock, because
those are protected by sRCU?
Paul Durrant Sept. 14, 2023, 9:34 a.m. UTC | #2
On 14/09/2023 10:21, David Woodhouse wrote:
> On Thu, 2023-09-14 at 08:49 +0000, Paul Durrant wrote:
>> --- a/arch/x86/kvm/xen.c
>> +++ b/arch/x86/kvm/xen.c
>> @@ -430,14 +430,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
>>                  smp_wmb();
>>          }
>>   
>> -       if (user_len2)
>> +       if (user_len2) {
>> +               kvm_gpc_mark_dirty(gpc2);
>>                  read_unlock(&gpc2->lock);
>> +       }
>>   
>> +       kvm_gpc_mark_dirty(gpc1);
>>          read_unlock_irqrestore(&gpc1->lock, flags);
>> -
>> -       mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
>> -       if (user_len2)
>> -               mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
>>   }
>>   
>>   void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
> 
> ISTR there was a reason why the mark_page_dirty_in_slot() was called
> *after* unlocking. Although now I say it, that seems wrong... is that
> because the spinlock is only protecting the uHVA→kHVA mapping, while
> the memslot/gpa are going to remain valid even after unlock, because
> those are protected by sRCU?

Without the lock you could see an inconsistent GPA and memslot so I 
think you could theoretically calculate a bogus rel_gfn and walk off the 
end of the dirty bitmap. Hence moving the call inside the lock while I 
was in the neighbourhood seemed like a good idea. I could call it out in 
the commit comment if you'd like.

   Paul
David Woodhouse Sept. 14, 2023, 12:39 p.m. UTC | #3
On Thu, 2023-09-14 at 11:34 +0200, Paul Durrant wrote:
> On 14/09/2023 10:21, David Woodhouse wrote:
> > On Thu, 2023-09-14 at 08:49 +0000, Paul Durrant wrote:
> > > --- a/arch/x86/kvm/xen.c
> > > +++ b/arch/x86/kvm/xen.c
> > > @@ -430,14 +430,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
> > >                  smp_wmb();
> > >          }
> > >   
> > > -       if (user_len2)
> > > +       if (user_len2) {
> > > +               kvm_gpc_mark_dirty(gpc2);
> > >                  read_unlock(&gpc2->lock);
> > > +       }
> > >   
> > > +       kvm_gpc_mark_dirty(gpc1);
> > >          read_unlock_irqrestore(&gpc1->lock, flags);
> > > -
> > > -       mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
> > > -       if (user_len2)
> > > -               mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
> > >   }
> > >   
> > >   void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
> > 
> > ISTR there was a reason why the mark_page_dirty_in_slot() was called
> > *after* unlocking. Although now I say it, that seems wrong... is that
> > because the spinlock is only protecting the uHVA→kHVA mapping, while
> > the memslot/gpa are going to remain valid even after unlock, because
> > those are protected by sRCU?
> 
> Without the lock you could see an inconsistent GPA and memslot so I 
> think you could theoretically calculate a bogus rel_gfn and walk off the 
> end of the dirty bitmap. Hence moving the call inside the lock while I 
> was in the neighbourhood seemed like a good idea. I could call it out in 
> the commit comment if you'd like.

Yeah, I can't see a reason why it needs to be outside the lock, and as
you note, there really is a reason why it should be *inside*. Whatever
reason there was, it either disappeared in the revisions of the gpc
patch set or it was stupidity on my part in the first place.

So yeah, let it move inside the lock, call that out in the commit
message (I did note some of the other commits could have used a 'No
functional change intended' too, FWIW), and

Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>

Thanks.
Paul Durrant Sept. 14, 2023, 1:07 p.m. UTC | #4
On 14/09/2023 13:39, David Woodhouse wrote:
> On Thu, 2023-09-14 at 11:34 +0200, Paul Durrant wrote:
>> On 14/09/2023 10:21, David Woodhouse wrote:
>>> On Thu, 2023-09-14 at 08:49 +0000, Paul Durrant wrote:
>>>> --- a/arch/x86/kvm/xen.c
>>>> +++ b/arch/x86/kvm/xen.c
>>>> @@ -430,14 +430,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
>>>>                   smp_wmb();
>>>>           }
>>>>    
>>>> -       if (user_len2)
>>>> +       if (user_len2) {
>>>> +               kvm_gpc_mark_dirty(gpc2);
>>>>                   read_unlock(&gpc2->lock);
>>>> +       }
>>>>    
>>>> +       kvm_gpc_mark_dirty(gpc1);
>>>>           read_unlock_irqrestore(&gpc1->lock, flags);
>>>> -
>>>> -       mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
>>>> -       if (user_len2)
>>>> -               mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
>>>>    }
>>>>    
>>>>    void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
>>>
>>> ISTR there was a reason why the mark_page_dirty_in_slot() was called
>>> *after* unlocking. Although now I say it, that seems wrong... is that
>>> because the spinlock is only protecting the uHVA→kHVA mapping, while
>>> the memslot/gpa are going to remain valid even after unlock, because
>>> those are protected by sRCU?
>>
>> Without the lock you could see an inconsistent GPA and memslot so I
>> think you could theoretically calculate a bogus rel_gfn and walk off the
>> end of the dirty bitmap. Hence moving the call inside the lock while I
>> was in the neighbourhood seemed like a good idea. I could call it out in
>> the commit comment if you'd like.
> 
> Yeah, I can't see a reason why it needs to be outside the lock, and as
> you note, there really is a reason why it should be *inside*. Whatever
> reason there was, it either disappeared in the revisions of the gpc
> patch set or it was stupidity on my part in the first place.
> 
> So yeah, let it move inside the lock, call that out in the commit
> message (I did note some of the other commits could have used a 'No
> functional change intended' too, FWIW), and

Ack. Will do.

> 
> Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
> 

Thanks.

   Paul
diff mbox series

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6c9c81e82e65..d669a8801265 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3137,7 +3137,7 @@  static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
 
 	guest_hv_clock->version = ++vcpu->hv_clock.version;
 
-	mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
+	kvm_gpc_mark_dirty(gpc);
 	read_unlock_irqrestore(&gpc->lock, flags);
 
 	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 40edf4d1974c..33fddd29824b 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -430,14 +430,13 @@  static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
 		smp_wmb();
 	}
 
-	if (user_len2)
+	if (user_len2) {
+		kvm_gpc_mark_dirty(gpc2);
 		read_unlock(&gpc2->lock);
+	}
 
+	kvm_gpc_mark_dirty(gpc1);
 	read_unlock_irqrestore(&gpc1->lock, flags);
-
-	mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
-	if (user_len2)
-		mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
 }
 
 void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
@@ -543,13 +542,13 @@  void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
 			     : "0" (evtchn_pending_sel32));
 		WRITE_ONCE(vi->evtchn_upcall_pending, 1);
 	}
+
+	kvm_gpc_mark_dirty(gpc);
 	read_unlock_irqrestore(&gpc->lock, flags);
 
 	/* For the per-vCPU lapic vector, deliver it as MSI. */
 	if (v->arch.xen.upcall_vector)
 		kvm_xen_inject_vcpu_vector(v);
-
-	mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
 }
 
 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index fb6c6109fdca..c71e8fbccaaf 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1367,6 +1367,13 @@  int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
  */
 void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
 
+/**
+ * kvm_gpc_mark_dirty - mark a cached page as dirty.
+ *
+ * @gpc:	   struct gfn_to_pfn_cache object.
+ */
+void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc);
+
 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
 
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 0f36acdf577f..b68ed7fa56a2 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -386,6 +386,12 @@  int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
 }
 EXPORT_SYMBOL_GPL(kvm_gpc_activate);
 
+void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
+{
+	mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(kvm_gpc_mark_dirty);
+
 void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
 {
 	struct kvm *kvm = gpc->kvm;