diff mbox series

[2/2] s390: Convert vsie code to use page->private

Message ID 20241219162252.1025317-3-willy@infradead.org (mailing list archive)
State New
Headers show
Series s390: Remove uses of page->index | expand

Commit Message

Matthew Wilcox (Oracle) Dec. 19, 2024, 4:22 p.m. UTC
The vsie pages are not standard page tables, so do not convert them to
use ptdesc.  Howver, page->index is going away so use page->private to
store the address rather than page->index.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 arch/s390/kvm/vsie.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

Comments

David Hildenbrand Dec. 20, 2024, 9:55 a.m. UTC | #1
On 19.12.24 17:22, Matthew Wilcox (Oracle) wrote:
> The vsie pages are not standard page tables, so do not convert them to
> use ptdesc.  

They are not page tables "at all" :)

A vsie_page is used when emulating hardware virtualization (SIE --
Start Interpretive Executio) for a CPU: vSIE -- virtual SIE

These pages primarily hold the shadowed "SIE control block" (SCB) that tells
the hardware what to do when entering hardware virtualization (SIE), and
include things like CPU state such as registers. We store some other
information in the same page.

We have to the SCB provided by the VM VCPU when running the nested VM VCPU. The
SCB resides in guest physical memory. So similar to shadowing of page tables,
we have to detect modifications to the SCB, so we can update out shadowed version
accordingly.

We use grab a page and fill a vsie page VCPU wants to execute the SIE instruction
(executing the nested VM VCPU), and return it to the pool when we are done
emulating the SIE instruction. We try to reuse the same vsie pages over various
runs, thats why we store the address of the last SCB address we shadowed,
to look it up. (improves HW performance)


So page->index will hold the "guest physical address of the SCB we shadowed
the last time this vsie page was used".

We seem to have space in the vsie page, so I think we can avoid messing
with page-> completely!

 From c94e4ecd6ee791ef9cda1c0577a1e765e5ce2867 Mon Sep 17 00:00:00 2001
From: David Hildenbrand <david@redhat.com>
Date: Fri, 20 Dec 2024 10:53:46 +0100
Subject: [PATCH] tmp

Signed-off-by: David Hildenbrand <david@redhat.com>
---
  arch/s390/kvm/vsie.c | 19 ++++++++++++++-----
  1 file changed, 14 insertions(+), 5 deletions(-)

diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 150b9387860ad..0a8cffe9b80bf 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -46,7 +46,13 @@ struct vsie_page {
  	gpa_t gvrd_gpa;				/* 0x0240 */
  	gpa_t riccbd_gpa;			/* 0x0248 */
  	gpa_t sdnx_gpa;				/* 0x0250 */
-	__u8 reserved[0x0700 - 0x0258];		/* 0x0258 */
+	/*
+	 * guest address of the original SCB. Remains set for free vsie
+	 * pages, so we can properly look them up in our addr_to_page
+	 * radix tree.
+	 */
+	gpa_t scb_gpa;				/* 0x0258 */
+	__u8 reserved[0x0700 - 0x0260];		/* 0x0260 */
  	struct kvm_s390_crypto_cb crycb;	/* 0x0700 */
  	__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE];	/* 0x0800 */
  };
@@ -1383,6 +1389,7 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
  		page_ref_inc(page);
  		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
  		kvm->arch.vsie.page_count++;
+		vsie_page = page_to_virt(page);
  	} else {
  		/* reuse an existing entry that belongs to nobody */
  		while (true) {
@@ -1393,9 +1400,11 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
  			kvm->arch.vsie.next++;
  			kvm->arch.vsie.next %= nr_vcpus;
  		}
-		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
+		vsie_page = page_to_virt(page);
+		radix_tree_delete(&kvm->arch.vsie.addr_to_page,
+				  vsie_page->scb_gpa >> 9);
  	}
-	page->index = addr;
+	vsie_page->scb_gpa = addr;
  	/* double use of the same address */
  	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
  		page_ref_dec(page);
@@ -1404,7 +1413,6 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
  	}
  	mutex_unlock(&kvm->arch.vsie.mutex);
  
-	vsie_page = page_to_virt(page);
  	memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
  	release_gmap_shadow(vsie_page);
  	vsie_page->fault_addr = 0;
@@ -1496,7 +1504,8 @@ void kvm_s390_vsie_destroy(struct kvm *kvm)
  		vsie_page = page_to_virt(page);
  		release_gmap_shadow(vsie_page);
  		/* free the radix tree entry */
-		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
+		radix_tree_delete(&kvm->arch.vsie.addr_to_page,
+				  vsie_page->scb_gpa >> 9);
  		__free_page(page);
  	}
  	kvm->arch.vsie.page_count = 0;
Claudio Imbrenda Dec. 20, 2024, 11:40 a.m. UTC | #2
On Fri, 20 Dec 2024 10:55:44 +0100
David Hildenbrand <david@redhat.com> wrote:

> On 19.12.24 17:22, Matthew Wilcox (Oracle) wrote:
> > The vsie pages are not standard page tables, so do not convert them to
> > use ptdesc.    
> 
> They are not page tables "at all" :)
> 
> A vsie_page is used when emulating hardware virtualization (SIE --
> Start Interpretive Executio) for a CPU: vSIE -- virtual SIE
> 
> These pages primarily hold the shadowed "SIE control block" (SCB) that tells
> the hardware what to do when entering hardware virtualization (SIE), and
> include things like CPU state such as registers. We store some other
> information in the same page.
> 
> We have to the SCB provided by the VM VCPU when running the nested VM VCPU. The
> SCB resides in guest physical memory. So similar to shadowing of page tables,
> we have to detect modifications to the SCB, so we can update out shadowed version
> accordingly.
> 
> We use grab a page and fill a vsie page VCPU wants to execute the SIE instruction
> (executing the nested VM VCPU), and return it to the pool when we are done
> emulating the SIE instruction. We try to reuse the same vsie pages over various
> runs, thats why we store the address of the last SCB address we shadowed,
> to look it up. (improves HW performance)
> 
> 
> So page->index will hold the "guest physical address of the SCB we shadowed
> the last time this vsie page was used".
> 
> We seem to have space in the vsie page, so I think we can avoid messing
> with page-> completely!

I really like this.

> 
>  From c94e4ecd6ee791ef9cda1c0577a1e765e5ce2867 Mon Sep 17 00:00:00 2001
> From: David Hildenbrand <david@redhat.com>
> Date: Fri, 20 Dec 2024 10:53:46 +0100
> Subject: [PATCH] tmp
> 
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>   arch/s390/kvm/vsie.c | 19 ++++++++++++++-----
>   1 file changed, 14 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
> index 150b9387860ad..0a8cffe9b80bf 100644
> --- a/arch/s390/kvm/vsie.c
> +++ b/arch/s390/kvm/vsie.c
> @@ -46,7 +46,13 @@ struct vsie_page {
>   	gpa_t gvrd_gpa;				/* 0x0240 */
>   	gpa_t riccbd_gpa;			/* 0x0248 */
>   	gpa_t sdnx_gpa;				/* 0x0250 */
> -	__u8 reserved[0x0700 - 0x0258];		/* 0x0258 */
> +	/*
> +	 * guest address of the original SCB. Remains set for free vsie
> +	 * pages, so we can properly look them up in our addr_to_page
> +	 * radix tree.
> +	 */
> +	gpa_t scb_gpa;				/* 0x0258 */
> +	__u8 reserved[0x0700 - 0x0260];		/* 0x0260 */
>   	struct kvm_s390_crypto_cb crycb;	/* 0x0700 */
>   	__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE];	/* 0x0800 */
>   };
> @@ -1383,6 +1389,7 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
>   		page_ref_inc(page);
>   		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
>   		kvm->arch.vsie.page_count++;
> +		vsie_page = page_to_virt(page);
>   	} else {
>   		/* reuse an existing entry that belongs to nobody */
>   		while (true) {
> @@ -1393,9 +1400,11 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
>   			kvm->arch.vsie.next++;
>   			kvm->arch.vsie.next %= nr_vcpus;
>   		}
> -		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
> +		vsie_page = page_to_virt(page);
> +		radix_tree_delete(&kvm->arch.vsie.addr_to_page,
> +				  vsie_page->scb_gpa >> 9);
>   	}
> -	page->index = addr;
> +	vsie_page->scb_gpa = addr;
>   	/* double use of the same address */
>   	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
>   		page_ref_dec(page);
> @@ -1404,7 +1413,6 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
>   	}
>   	mutex_unlock(&kvm->arch.vsie.mutex);
>   
> -	vsie_page = page_to_virt(page);
>   	memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
>   	release_gmap_shadow(vsie_page);
>   	vsie_page->fault_addr = 0;
> @@ -1496,7 +1504,8 @@ void kvm_s390_vsie_destroy(struct kvm *kvm)
>   		vsie_page = page_to_virt(page);
>   		release_gmap_shadow(vsie_page);
>   		/* free the radix tree entry */
> -		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
> +		radix_tree_delete(&kvm->arch.vsie.addr_to_page,
> +				  vsie_page->scb_gpa >> 9);
>   		__free_page(page);
>   	}
>   	kvm->arch.vsie.page_count = 0;
diff mbox series

Patch

diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 150b9387860a..26cbd69eb06d 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1393,9 +1393,9 @@  static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
 			kvm->arch.vsie.next++;
 			kvm->arch.vsie.next %= nr_vcpus;
 		}
-		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
+		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->private >> 9);
 	}
-	page->index = addr;
+	page->private = addr;
 	/* double use of the same address */
 	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
 		page_ref_dec(page);
@@ -1496,7 +1496,7 @@  void kvm_s390_vsie_destroy(struct kvm *kvm)
 		vsie_page = page_to_virt(page);
 		release_gmap_shadow(vsie_page);
 		/* free the radix tree entry */
-		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
+		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->private >> 9);
 		__free_page(page);
 	}
 	kvm->arch.vsie.page_count = 0;