diff mbox series

[7/8] KVM: xen: prepare for using 'default' vcpu_info

Message ID 20230914084946.200043-8-paul@xen.org (mailing list archive)
State New, archived
Headers show
Series KVM: xen: update shared_info and vcpu_info handling | expand

Commit Message

Paul Durrant Sept. 14, 2023, 8:49 a.m. UTC
From: Paul Durrant <pdurrant@amazon.com>

The shared_info page contains an array of 32 vcpu_info structures
which may be used by guests (with less than 32 vCPUs) if they don't
explicitly register vcpu_info structures in their own memory, using
a VCPUOP_register_vcpu_info hypercall.
Currently we rely on the VMM always registering vcpu_info structures,
even if the guest doesn't make that hypercall, which is somewhat
bogus as (as has been stated in the comment of a previous commit)
the shared_info page is not guest memory.
Prepare to automatically use the vcpu_info info embedded in shared_info
by default, by adding a get_vcpu_info_cache() helper function. This
function also passes back an offset to be added to the cached khva.
This is currently always zero since we're still relying on the
current VMM behaviour. A subsequent patch will make proper use of
it.

NOTE: To avoid leaking detail of the vcpu_info duality into the main
      x86 code, a kvm_xen_guest_time_update() has also been added
      and use of this requires that kvm_setup_guest_pvclock() ceases
      to be a static function.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: x86@kernel.org
---
 arch/x86/include/asm/kvm_host.h |  4 +++
 arch/x86/kvm/x86.c              | 12 +++-----
 arch/x86/kvm/xen.c              | 50 ++++++++++++++++++++++++++-------
 arch/x86/kvm/xen.h              |  6 +++-
 4 files changed, 53 insertions(+), 19 deletions(-)

Comments

David Woodhouse Sept. 14, 2023, 1:17 p.m. UTC | #1
On Thu, 2023-09-14 at 08:49 +0000, Paul Durrant wrote:
> From: Paul Durrant <pdurrant@amazon.com>
> 
> The shared_info page contains an array of 32 vcpu_info structures
> which may be used by guests (with less than 32 vCPUs) if they don't
                                     ^ fewer

> explicitly register vcpu_info structures in their own memory, using
> a VCPUOP_register_vcpu_info hypercall.
> Currently we rely on the VMM always registering vcpu_info structures,
> even if the guest doesn't make that hypercall, which is somewhat
> bogus as (as has been stated in the comment of a previous commit)
> the shared_info page is not guest memory.
> Prepare to automatically use the vcpu_info info embedded in shared_info
> by default, by adding a get_vcpu_info_cache() helper function. This
> function also passes back an offset to be added to the cached khva.
> This is currently always zero since we're still relying on the
> current VMM behaviour. A subsequent patch will make proper use of
> it.
> 
> NOTE: To avoid leaking detail of the vcpu_info duality into the main
>       x86 code, a kvm_xen_guest_time_update() has also been added
>       and use of this requires that kvm_setup_guest_pvclock() ceases
>       to be a static function.

No functional change intended...?


> Signed-off-by: Paul Durrant <pdurrant@amazon.com>

Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>

> Cc: Sean Christopherson <seanjc@google.com>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: "H. Peter Anvin" <hpa@zytor.com>
> Cc: David Woodhouse <dwmw2@infradead.org>
> Cc: x86@kernel.org
> ---
>  arch/x86/include/asm/kvm_host.h |  4 +++
>  arch/x86/kvm/x86.c              | 12 +++-----
>  arch/x86/kvm/xen.c              | 50 ++++++++++++++++++++++++++-------
>  arch/x86/kvm/xen.h              |  6 +++-
>  4 files changed, 53 insertions(+), 19 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 1a4def36d5bb..6d896f9161c2 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -2238,4 +2238,8 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
>   */
>  #define KVM_EXIT_HYPERCALL_MBZ         GENMASK_ULL(31, 1)
>  
> +void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
> +                            struct gfn_to_pfn_cache *gpc,
> +                            unsigned int offset);
> +
>  #endif /* _ASM_X86_KVM_HOST_H */
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 0df06f47801c..4cd577d01bc4 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -3094,9 +3094,9 @@ u64 get_kvmclock_ns(struct kvm *kvm)
>         return data.clock;
>  }
>  
> -static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
> -                                   struct gfn_to_pfn_cache *gpc,
> -                                   unsigned int offset)
> +void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
> +                            struct gfn_to_pfn_cache *gpc,
> +                            unsigned int offset)
>  {
>         struct kvm_vcpu_arch *vcpu = &v->arch;
>         struct pvclock_vcpu_time_info *guest_hv_clock;
> @@ -3232,11 +3232,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
>  
>         if (vcpu->pv_time.active)
>                 kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0);
> -       if (vcpu->xen.vcpu_info_cache.active)
> -               kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
> -                                       offsetof(struct compat_vcpu_info, time));
> -       if (vcpu->xen.vcpu_time_info_cache.active)
> -               kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0);
> +       kvm_xen_guest_time_update(v);
>         kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
>         return 0;
>  }
> diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
> index 1abb4547642a..892563fea40f 100644
> --- a/arch/x86/kvm/xen.c
> +++ b/arch/x86/kvm/xen.c
> @@ -489,6 +489,29 @@ static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
>         WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL));
>  }
>  
> +struct gfn_to_pfn_cache *get_vcpu_info_cache(struct kvm_vcpu *v, unsigned long *offset)
> +{
> +       if (offset)
> +               *offset = 0;
> +
> +       return &v->arch.xen.vcpu_info_cache;
> +}
> +
> +void kvm_xen_guest_time_update(struct kvm_vcpu *v)
> +{
> +       unsigned long offset;
> +       struct gfn_to_pfn_cache *gpc = get_vcpu_info_cache(v, &offset);
> +
> +       BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
> +                    offsetof(struct compat_vcpu_info, time));
> +
> +       if (gpc->active)
> +               kvm_setup_guest_pvclock(v, gpc, offset + offsetof(struct compat_vcpu_info, time));
> +
> +       if (v->arch.xen.vcpu_time_info_cache.active)
> +               kvm_setup_guest_pvclock(v, &v->arch.xen.vcpu_time_info_cache, 0);
> +}
> +
>  /*
>   * On event channel delivery, the vcpu_info may not have been accessible.
>   * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
> @@ -499,7 +522,8 @@ static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
>  void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
>  {
>         unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
> -       struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
> +       unsigned long offset;
> +       struct gfn_to_pfn_cache *gpc = get_vcpu_info_cache(v, &offset);
>         unsigned long flags;
>  
>         if (!evtchn_pending_sel)
> @@ -522,7 +546,7 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
>  
>         /* Now gpc->khva is a valid kernel address for the vcpu_info */
>         if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
> -               struct vcpu_info *vi = gpc->khva;
> +               struct vcpu_info *vi = gpc->khva + offset;
>  
>                 asm volatile(LOCK_PREFIX "orq %0, %1\n"
>                              "notq %0\n"
> @@ -534,7 +558,7 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
>                 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
>         } else {
>                 u32 evtchn_pending_sel32 = evtchn_pending_sel;
> -               struct compat_vcpu_info *vi = gpc->khva;
> +               struct compat_vcpu_info *vi = gpc->khva + offset;
>  
>                 asm volatile(LOCK_PREFIX "orl %0, %1\n"
>                              "notl %0\n"
> @@ -556,7 +580,8 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
>  
>  int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
>  {
> -       struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
> +       unsigned long offset;
> +       struct gfn_to_pfn_cache *gpc = get_vcpu_info_cache(v, &offset);
>         unsigned long flags;
>         u8 rc = 0;
>  
> @@ -598,7 +623,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
>                 read_lock_irqsave(&gpc->lock, flags);
>         }
>  
> -       rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
> +       rc = ((struct vcpu_info *)(gpc->khva + offset))->evtchn_upcall_pending;
>         read_unlock_irqrestore(&gpc->lock, flags);
>         return rc;
>  }
> @@ -1567,7 +1592,7 @@ static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
>   */
>  int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
>  {
> -       struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
> +       struct gfn_to_pfn_cache *gpc;
>         struct kvm_vcpu *vcpu;
>         unsigned long *pending_bits, *mask_bits;
>         unsigned long flags;
> @@ -1585,7 +1610,8 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
>                 WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
>         }
>  
> -       if (!vcpu->arch.xen.vcpu_info_cache.active)
> +       gpc = get_vcpu_info_cache(vcpu, NULL);
> +       if (!gpc->active)
>                 return -EINVAL;
>  
>         if (xe->port >= max_evtchn_port(kvm))
> @@ -1594,6 +1620,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
>         rc = -EWOULDBLOCK;
>  
>         idx = srcu_read_lock(&kvm->srcu);
> +       gpc = &kvm->arch.xen.shinfo_cache;
>  
>         read_lock_irqsave(&gpc->lock, flags);
>         if (!kvm_gpc_check(gpc, PAGE_SIZE))
> @@ -1624,10 +1651,13 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
>                 rc = -ENOTCONN; /* Masked */
>                 kvm_xen_check_poller(vcpu, xe->port);
>         } else {
> +               unsigned long offset;
> +
>                 rc = 1; /* Delivered to the bitmap in shared_info. */
> +
>                 /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
>                 read_unlock_irqrestore(&gpc->lock, flags);
> -               gpc = &vcpu->arch.xen.vcpu_info_cache;
> +               gpc = get_vcpu_info_cache(vcpu, &offset);
>  
>                 read_lock_irqsave(&gpc->lock, flags);
>                 if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
> @@ -1641,13 +1671,13 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
>                 }
>  
>                 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
> -                       struct vcpu_info *vcpu_info = gpc->khva;
> +                       struct vcpu_info *vcpu_info = gpc->khva + offset;
>                         if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
>                                 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
>                                 kick_vcpu = true;
>                         }
>                 } else {
> -                       struct compat_vcpu_info *vcpu_info = gpc->khva;
> +                       struct compat_vcpu_info *vcpu_info = gpc->khva + offset;
>                         if (!test_and_set_bit(port_word_bit,
>                                               (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
>                                 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
> diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
> index f8f1fe22d090..c4d29ccbc3ab 100644
> --- a/arch/x86/kvm/xen.h
> +++ b/arch/x86/kvm/xen.h
> @@ -16,6 +16,7 @@
>  
>  extern struct static_key_false_deferred kvm_xen_enabled;
>  
> +void kvm_xen_guest_time_update(struct kvm_vcpu *vcpu);
>  int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
>  void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
>  int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
> @@ -52,7 +53,6 @@ static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
>  static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
>  {
>         if (static_branch_unlikely(&kvm_xen_enabled.key) &&
> -           vcpu->arch.xen.vcpu_info_cache.active &&
>             vcpu->kvm->arch.xen.upcall_vector)
>                 return __kvm_xen_has_interrupt(vcpu);
>  
> @@ -80,6 +80,10 @@ static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
>  
>  void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu);
>  #else
> +static inline void kvm_xen_guest_time_update(struct kvm_vcpu *vcpu)
> +{
> +}
> +
>  static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
>  {
>         return 1;
kernel test robot Sept. 14, 2023, 8:10 p.m. UTC | #2
Hi Paul,

kernel test robot noticed the following build warnings:

[auto build test WARNING on kvm/queue]
[also build test WARNING on mst-vhost/linux-next]
[cannot apply to kvm/linux-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Paul-Durrant/KVM-pfncache-add-a-map-helper-function/20230914-171017
base:   https://git.kernel.org/pub/scm/virt/kvm/kvm.git queue
patch link:    https://lore.kernel.org/r/20230914084946.200043-8-paul%40xen.org
patch subject: [PATCH 7/8] KVM: xen: prepare for using 'default' vcpu_info
config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20230915/202309150313.w8vtnM1C-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20230915/202309150313.w8vtnM1C-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202309150313.w8vtnM1C-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> arch/x86/kvm/xen.c:492:26: warning: no previous prototype for 'get_vcpu_info_cache' [-Wmissing-prototypes]
     492 | struct gfn_to_pfn_cache *get_vcpu_info_cache(struct kvm_vcpu *v, unsigned long *offset)
         |                          ^~~~~~~~~~~~~~~~~~~


vim +/get_vcpu_info_cache +492 arch/x86/kvm/xen.c

   491	
 > 492	struct gfn_to_pfn_cache *get_vcpu_info_cache(struct kvm_vcpu *v, unsigned long *offset)
   493	{
   494		if (offset)
   495			*offset = 0;
   496	
   497		return &v->arch.xen.vcpu_info_cache;
   498	}
   499
kernel test robot Sept. 18, 2023, 9:25 a.m. UTC | #3
Hi Paul,

kernel test robot noticed the following build warnings:

[auto build test WARNING on kvm/queue]
[also build test WARNING on mst-vhost/linux-next linus/master v6.6-rc2 next-20230918]
[cannot apply to kvm/linux-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Paul-Durrant/KVM-pfncache-add-a-map-helper-function/20230914-171017
base:   https://git.kernel.org/pub/scm/virt/kvm/kvm.git queue
patch link:    https://lore.kernel.org/r/20230914084946.200043-8-paul%40xen.org
patch subject: [PATCH 7/8] KVM: xen: prepare for using 'default' vcpu_info
config: i386-randconfig-063-20230918 (https://download.01.org/0day-ci/archive/20230918/202309181725.cpTHcjo6-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20230918/202309181725.cpTHcjo6-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202309181725.cpTHcjo6-lkp@intel.com/

sparse warnings: (new ones prefixed by >>)
>> arch/x86/kvm/xen.c:492:25: sparse: sparse: symbol 'get_vcpu_info_cache' was not declared. Should it be static?
   arch/x86/kvm/xen.c:441:9: sparse: sparse: context imbalance in 'kvm_xen_update_runstate_guest' - unexpected unlock
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1a4def36d5bb..6d896f9161c2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2238,4 +2238,8 @@  int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
  */
 #define KVM_EXIT_HYPERCALL_MBZ		GENMASK_ULL(31, 1)
 
+void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
+			     struct gfn_to_pfn_cache *gpc,
+			     unsigned int offset);
+
 #endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0df06f47801c..4cd577d01bc4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3094,9 +3094,9 @@  u64 get_kvmclock_ns(struct kvm *kvm)
 	return data.clock;
 }
 
-static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
-				    struct gfn_to_pfn_cache *gpc,
-				    unsigned int offset)
+void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
+			     struct gfn_to_pfn_cache *gpc,
+			     unsigned int offset)
 {
 	struct kvm_vcpu_arch *vcpu = &v->arch;
 	struct pvclock_vcpu_time_info *guest_hv_clock;
@@ -3232,11 +3232,7 @@  static int kvm_guest_time_update(struct kvm_vcpu *v)
 
 	if (vcpu->pv_time.active)
 		kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0);
-	if (vcpu->xen.vcpu_info_cache.active)
-		kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
-					offsetof(struct compat_vcpu_info, time));
-	if (vcpu->xen.vcpu_time_info_cache.active)
-		kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0);
+	kvm_xen_guest_time_update(v);
 	kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
 	return 0;
 }
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 1abb4547642a..892563fea40f 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -489,6 +489,29 @@  static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
 	WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL));
 }
 
+struct gfn_to_pfn_cache *get_vcpu_info_cache(struct kvm_vcpu *v, unsigned long *offset)
+{
+	if (offset)
+		*offset = 0;
+
+	return &v->arch.xen.vcpu_info_cache;
+}
+
+void kvm_xen_guest_time_update(struct kvm_vcpu *v)
+{
+	unsigned long offset;
+	struct gfn_to_pfn_cache *gpc = get_vcpu_info_cache(v, &offset);
+
+	BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
+		     offsetof(struct compat_vcpu_info, time));
+
+	if (gpc->active)
+		kvm_setup_guest_pvclock(v, gpc, offset + offsetof(struct compat_vcpu_info, time));
+
+	if (v->arch.xen.vcpu_time_info_cache.active)
+		kvm_setup_guest_pvclock(v, &v->arch.xen.vcpu_time_info_cache, 0);
+}
+
 /*
  * On event channel delivery, the vcpu_info may not have been accessible.
  * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
@@ -499,7 +522,8 @@  static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
 void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
 {
 	unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
-	struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
+	unsigned long offset;
+	struct gfn_to_pfn_cache *gpc = get_vcpu_info_cache(v, &offset);
 	unsigned long flags;
 
 	if (!evtchn_pending_sel)
@@ -522,7 +546,7 @@  void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
 
 	/* Now gpc->khva is a valid kernel address for the vcpu_info */
 	if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
-		struct vcpu_info *vi = gpc->khva;
+		struct vcpu_info *vi = gpc->khva + offset;
 
 		asm volatile(LOCK_PREFIX "orq %0, %1\n"
 			     "notq %0\n"
@@ -534,7 +558,7 @@  void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
 		WRITE_ONCE(vi->evtchn_upcall_pending, 1);
 	} else {
 		u32 evtchn_pending_sel32 = evtchn_pending_sel;
-		struct compat_vcpu_info *vi = gpc->khva;
+		struct compat_vcpu_info *vi = gpc->khva + offset;
 
 		asm volatile(LOCK_PREFIX "orl %0, %1\n"
 			     "notl %0\n"
@@ -556,7 +580,8 @@  void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
 
 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
 {
-	struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
+	unsigned long offset;
+	struct gfn_to_pfn_cache *gpc = get_vcpu_info_cache(v, &offset);
 	unsigned long flags;
 	u8 rc = 0;
 
@@ -598,7 +623,7 @@  int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
 		read_lock_irqsave(&gpc->lock, flags);
 	}
 
-	rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
+	rc = ((struct vcpu_info *)(gpc->khva + offset))->evtchn_upcall_pending;
 	read_unlock_irqrestore(&gpc->lock, flags);
 	return rc;
 }
@@ -1567,7 +1592,7 @@  static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
  */
 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
 {
-	struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
+	struct gfn_to_pfn_cache *gpc;
 	struct kvm_vcpu *vcpu;
 	unsigned long *pending_bits, *mask_bits;
 	unsigned long flags;
@@ -1585,7 +1610,8 @@  int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
 		WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
 	}
 
-	if (!vcpu->arch.xen.vcpu_info_cache.active)
+	gpc = get_vcpu_info_cache(vcpu, NULL);
+	if (!gpc->active)
 		return -EINVAL;
 
 	if (xe->port >= max_evtchn_port(kvm))
@@ -1594,6 +1620,7 @@  int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
 	rc = -EWOULDBLOCK;
 
 	idx = srcu_read_lock(&kvm->srcu);
+	gpc = &kvm->arch.xen.shinfo_cache;
 
 	read_lock_irqsave(&gpc->lock, flags);
 	if (!kvm_gpc_check(gpc, PAGE_SIZE))
@@ -1624,10 +1651,13 @@  int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
 		rc = -ENOTCONN; /* Masked */
 		kvm_xen_check_poller(vcpu, xe->port);
 	} else {
+		unsigned long offset;
+
 		rc = 1; /* Delivered to the bitmap in shared_info. */
+
 		/* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
 		read_unlock_irqrestore(&gpc->lock, flags);
-		gpc = &vcpu->arch.xen.vcpu_info_cache;
+		gpc = get_vcpu_info_cache(vcpu, &offset);
 
 		read_lock_irqsave(&gpc->lock, flags);
 		if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
@@ -1641,13 +1671,13 @@  int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
 		}
 
 		if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
-			struct vcpu_info *vcpu_info = gpc->khva;
+			struct vcpu_info *vcpu_info = gpc->khva + offset;
 			if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
 				WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
 				kick_vcpu = true;
 			}
 		} else {
-			struct compat_vcpu_info *vcpu_info = gpc->khva;
+			struct compat_vcpu_info *vcpu_info = gpc->khva + offset;
 			if (!test_and_set_bit(port_word_bit,
 					      (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
 				WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
index f8f1fe22d090..c4d29ccbc3ab 100644
--- a/arch/x86/kvm/xen.h
+++ b/arch/x86/kvm/xen.h
@@ -16,6 +16,7 @@ 
 
 extern struct static_key_false_deferred kvm_xen_enabled;
 
+void kvm_xen_guest_time_update(struct kvm_vcpu *vcpu);
 int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
 void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
@@ -52,7 +53,6 @@  static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
 {
 	if (static_branch_unlikely(&kvm_xen_enabled.key) &&
-	    vcpu->arch.xen.vcpu_info_cache.active &&
 	    vcpu->kvm->arch.xen.upcall_vector)
 		return __kvm_xen_has_interrupt(vcpu);
 
@@ -80,6 +80,10 @@  static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
 
 void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu);
 #else
+static inline void kvm_xen_guest_time_update(struct kvm_vcpu *vcpu)
+{
+}
+
 static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
 {
 	return 1;