diff mbox series

[5/7] KVM: arm64: Remove hyp_pool pointer from struct hyp_page

Message ID 20210527125134.2116404-6-qperret@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Reduce hyp_vmemmap overhead | expand

Commit Message

Quentin Perret May 27, 2021, 12:51 p.m. UTC
Each struct hyp_page currently contains a pointer to a hyp_pool struct
where the page should be freed if its refcount reaches 0. However, this
information can always be inferred from the context in the EL2 code, so
drop the pointer to save a few bytes in the vmemmap.

Signed-off-by: Quentin Perret <qperret@google.com>
---
 arch/arm64/kvm/hyp/include/nvhe/gfp.h    |  4 ++--
 arch/arm64/kvm/hyp/include/nvhe/memory.h |  2 --
 arch/arm64/kvm/hyp/nvhe/mem_protect.c    | 13 +++++++++++--
 arch/arm64/kvm/hyp/nvhe/page_alloc.c     |  7 ++-----
 arch/arm64/kvm/hyp/nvhe/setup.c          | 14 ++++++++++++--
 5 files changed, 27 insertions(+), 13 deletions(-)

Comments

Marc Zyngier June 1, 2021, 3 p.m. UTC | #1
On Thu, 27 May 2021 13:51:32 +0100,
Quentin Perret <qperret@google.com> wrote:
> 
> Each struct hyp_page currently contains a pointer to a hyp_pool struct
> where the page should be freed if its refcount reaches 0. However, this
> information can always be inferred from the context in the EL2 code, so
> drop the pointer to save a few bytes in the vmemmap.
> 
> Signed-off-by: Quentin Perret <qperret@google.com>
> ---
>  arch/arm64/kvm/hyp/include/nvhe/gfp.h    |  4 ++--
>  arch/arm64/kvm/hyp/include/nvhe/memory.h |  2 --
>  arch/arm64/kvm/hyp/nvhe/mem_protect.c    | 13 +++++++++++--
>  arch/arm64/kvm/hyp/nvhe/page_alloc.c     |  7 ++-----
>  arch/arm64/kvm/hyp/nvhe/setup.c          | 14 ++++++++++++--
>  5 files changed, 27 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> index aada4d97de49..9ed374648364 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> @@ -42,8 +42,8 @@ static inline void hyp_set_page_refcounted(struct hyp_page *p)
>  
>  /* Allocation */
>  void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
> -void hyp_get_page(void *addr);
> -void hyp_put_page(void *addr);
> +void hyp_get_page(void *addr, struct hyp_pool *pool);
> +void hyp_put_page(void *addr, struct hyp_pool *pool);

It'd be good to be consistent with __hyp_put_page(), which has these
arguments in the opposite order. See below for an example.

>  
>  /* Used pages cannot be freed */
>  int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> index 7691ab495eb4..991636be2f46 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> @@ -7,11 +7,9 @@
>  
>  #include <linux/types.h>
>  
> -struct hyp_pool;
>  struct hyp_page {
>  	unsigned int refcount;
>  	unsigned int order;
> -	struct hyp_pool *pool;
>  };
>  
>  extern u64 __hyp_vmemmap;
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index fdd5b5702e8a..3603311eb41c 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -42,6 +42,15 @@ static void *host_s2_zalloc_page(void *pool)
>  {
>  	return hyp_alloc_pages(pool, 0);
>  }
> +static void host_s2_get_page(void *addr)

nit: missing blank line.

> +{
> +	hyp_get_page(addr, &host_s2_pool);
> +}
> +
> +static void host_s2_put_page(void *addr)
> +{
> +	hyp_put_page(addr, &host_s2_pool);
> +}
>  
>  static int prepare_s2_pool(void *pgt_pool_base)
>  {
> @@ -60,8 +69,8 @@ static int prepare_s2_pool(void *pgt_pool_base)
>  		.phys_to_virt = hyp_phys_to_virt,
>  		.virt_to_phys = hyp_virt_to_phys,
>  		.page_count = hyp_page_count,
> -		.get_page = hyp_get_page,
> -		.put_page = hyp_put_page,
> +		.get_page = host_s2_get_page,
> +		.put_page = host_s2_put_page,
>  	};
>  
>  	return 0;
> diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> index ce7379f1480b..e453108a2d95 100644
> --- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> @@ -150,20 +150,18 @@ static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
>  		__hyp_attach_page(pool, p);
>  }
>  
> -void hyp_put_page(void *addr)
> +void hyp_put_page(void *addr, struct hyp_pool *pool)
>  {
>  	struct hyp_page *p = hyp_virt_to_page(addr);
> -	struct hyp_pool *pool = hyp_page_to_pool(p);
>  
>  	hyp_spin_lock(&pool->lock);
>  	__hyp_put_page(pool, p);

When I see this, my eyes get crossed, and I don't know what I'm
reading anymore! ;-) In general, I like the "container" as a first
argument, followed by the element that can be contained.

>  	hyp_spin_unlock(&pool->lock);
>  }
>  
> -void hyp_get_page(void *addr)
> +void hyp_get_page(void *addr, struct hyp_pool *pool)
>  {
>  	struct hyp_page *p = hyp_virt_to_page(addr);
> -	struct hyp_pool *pool = hyp_page_to_pool(p);
>  
>  	hyp_spin_lock(&pool->lock);
>  	hyp_page_ref_inc(p);
> @@ -212,7 +210,6 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
>  	/* Init the vmemmap portion */
>  	p = hyp_phys_to_page(phys);
>  	for (i = 0; i < nr_pages; i++) {
> -		p[i].pool = pool;
>  		p[i].order = 0;
>  		hyp_set_page_refcounted(&p[i]);
>  	}
> diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
> index 709cb3d19eb7..bf61abd4a330 100644
> --- a/arch/arm64/kvm/hyp/nvhe/setup.c
> +++ b/arch/arm64/kvm/hyp/nvhe/setup.c
> @@ -137,6 +137,16 @@ static void *hyp_zalloc_hyp_page(void *arg)
>  	return hyp_alloc_pages(&hpool, 0);
>  }
>  
> +static void hpool_get_page(void *addr)
> +{
> +	hyp_get_page(addr, &hpool);
> +}
> +
> +static void hpool_put_page(void *addr)
> +{
> +	hyp_put_page(addr, &hpool);
> +}
> +
>  void __noreturn __pkvm_init_finalise(void)
>  {
>  	struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
> @@ -160,8 +170,8 @@ void __noreturn __pkvm_init_finalise(void)
>  		.zalloc_page = hyp_zalloc_hyp_page,
>  		.phys_to_virt = hyp_phys_to_virt,
>  		.virt_to_phys = hyp_virt_to_phys,
> -		.get_page = hyp_get_page,
> -		.put_page = hyp_put_page,
> +		.get_page = hpool_get_page,
> +		.put_page = hpool_put_page,
>  	};
>  	pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
>  
> -- 
> 2.31.1.818.g46aad6cb9e-goog
> 
> 

Thanks,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
index aada4d97de49..9ed374648364 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
@@ -42,8 +42,8 @@  static inline void hyp_set_page_refcounted(struct hyp_page *p)
 
 /* Allocation */
 void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
-void hyp_get_page(void *addr);
-void hyp_put_page(void *addr);
+void hyp_get_page(void *addr, struct hyp_pool *pool);
+void hyp_put_page(void *addr, struct hyp_pool *pool);
 
 /* Used pages cannot be freed */
 int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index 7691ab495eb4..991636be2f46 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -7,11 +7,9 @@ 
 
 #include <linux/types.h>
 
-struct hyp_pool;
 struct hyp_page {
 	unsigned int refcount;
 	unsigned int order;
-	struct hyp_pool *pool;
 };
 
 extern u64 __hyp_vmemmap;
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index fdd5b5702e8a..3603311eb41c 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -42,6 +42,15 @@  static void *host_s2_zalloc_page(void *pool)
 {
 	return hyp_alloc_pages(pool, 0);
 }
+static void host_s2_get_page(void *addr)
+{
+	hyp_get_page(addr, &host_s2_pool);
+}
+
+static void host_s2_put_page(void *addr)
+{
+	hyp_put_page(addr, &host_s2_pool);
+}
 
 static int prepare_s2_pool(void *pgt_pool_base)
 {
@@ -60,8 +69,8 @@  static int prepare_s2_pool(void *pgt_pool_base)
 		.phys_to_virt = hyp_phys_to_virt,
 		.virt_to_phys = hyp_virt_to_phys,
 		.page_count = hyp_page_count,
-		.get_page = hyp_get_page,
-		.put_page = hyp_put_page,
+		.get_page = host_s2_get_page,
+		.put_page = host_s2_put_page,
 	};
 
 	return 0;
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
index ce7379f1480b..e453108a2d95 100644
--- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -150,20 +150,18 @@  static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
 		__hyp_attach_page(pool, p);
 }
 
-void hyp_put_page(void *addr)
+void hyp_put_page(void *addr, struct hyp_pool *pool)
 {
 	struct hyp_page *p = hyp_virt_to_page(addr);
-	struct hyp_pool *pool = hyp_page_to_pool(p);
 
 	hyp_spin_lock(&pool->lock);
 	__hyp_put_page(pool, p);
 	hyp_spin_unlock(&pool->lock);
 }
 
-void hyp_get_page(void *addr)
+void hyp_get_page(void *addr, struct hyp_pool *pool)
 {
 	struct hyp_page *p = hyp_virt_to_page(addr);
-	struct hyp_pool *pool = hyp_page_to_pool(p);
 
 	hyp_spin_lock(&pool->lock);
 	hyp_page_ref_inc(p);
@@ -212,7 +210,6 @@  int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
 	/* Init the vmemmap portion */
 	p = hyp_phys_to_page(phys);
 	for (i = 0; i < nr_pages; i++) {
-		p[i].pool = pool;
 		p[i].order = 0;
 		hyp_set_page_refcounted(&p[i]);
 	}
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 709cb3d19eb7..bf61abd4a330 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -137,6 +137,16 @@  static void *hyp_zalloc_hyp_page(void *arg)
 	return hyp_alloc_pages(&hpool, 0);
 }
 
+static void hpool_get_page(void *addr)
+{
+	hyp_get_page(addr, &hpool);
+}
+
+static void hpool_put_page(void *addr)
+{
+	hyp_put_page(addr, &hpool);
+}
+
 void __noreturn __pkvm_init_finalise(void)
 {
 	struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
@@ -160,8 +170,8 @@  void __noreturn __pkvm_init_finalise(void)
 		.zalloc_page = hyp_zalloc_hyp_page,
 		.phys_to_virt = hyp_phys_to_virt,
 		.virt_to_phys = hyp_virt_to_phys,
-		.get_page = hyp_get_page,
-		.put_page = hyp_put_page,
+		.get_page = hpool_get_page,
+		.put_page = hpool_put_page,
 	};
 	pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;