diff mbox series

[v3,08/15] KVM: arm64: pkvm: Refcount the pages shared with EL2

Message ID 20211201170411.1561936-9-qperret@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Introduce kvm_share_hyp() | expand

Commit Message

Quentin Perret Dec. 1, 2021, 5:04 p.m. UTC
In order to simplify the page tracking infrastructure at EL2 in nVHE
protected mode, move the responsibility of refcounting pages that are
shared multiple times on the host. In order to do so, let's create a
red-black tree tracking all the PFNs that have been shared, along with
a refcount.

Signed-off-by: Quentin Perret <qperret@google.com>
---
 arch/arm64/kvm/mmu.c | 78 ++++++++++++++++++++++++++++++++++++++------
 1 file changed, 68 insertions(+), 10 deletions(-)

Comments

Will Deacon Dec. 9, 2021, 11:16 a.m. UTC | #1
On Wed, Dec 01, 2021 at 05:04:02PM +0000, Quentin Perret wrote:
> In order to simplify the page tracking infrastructure at EL2 in nVHE
> protected mode, move the responsibility of refcounting pages that are
> shared multiple times on the host. In order to do so, let's create a
> red-black tree tracking all the PFNs that have been shared, along with
> a refcount.
> 
> Signed-off-by: Quentin Perret <qperret@google.com>
> ---
>  arch/arm64/kvm/mmu.c | 78 ++++++++++++++++++++++++++++++++++++++------
>  1 file changed, 68 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index fd868fb9d922..d72566896755 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -284,23 +284,72 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
>  	}
>  }
>  
> -static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
> +struct hyp_shared_pfn {
> +	u64 pfn;
> +	int count;
> +	struct rb_node node;
> +};
> +
> +static DEFINE_MUTEX(hyp_shared_pfns_lock);
> +static struct rb_root hyp_shared_pfns = RB_ROOT;
> +
> +static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
> +					      struct rb_node **parent)
>  {
> -	phys_addr_t addr;
> -	int ret;
> +	struct hyp_shared_pfn *this;
> +
> +	*node = &hyp_shared_pfns.rb_node;
> +	*parent = NULL;
> +	while (**node) {
> +		this = container_of(**node, struct hyp_shared_pfn, node);
> +		*parent = **node;
> +		if (this->pfn < pfn)
> +			*node = &((**node)->rb_left);
> +		else if (this->pfn > pfn)
> +			*node = &((**node)->rb_right);
> +		else
> +			return this;
> +	}
>  
> -	for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
> -		ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
> -					__phys_to_pfn(addr));
> -		if (ret)
> -			return ret;
> +	return NULL;
> +}
> +
> +static int share_pfn_hyp(u64 pfn)
> +{
> +	struct rb_node **node, *parent;
> +	struct hyp_shared_pfn *this;
> +	int ret = 0;
> +
> +	mutex_lock(&hyp_shared_pfns_lock);
> +	this = find_shared_pfn(pfn, &node, &parent);

I don't think this is a fast-path at the moment, but in the future we might
consider using RCU to do the lookup outside of the mutex.

But as-is:

Acked-by: Will Deacon <will@kernel.org>

Will
diff mbox series

Patch

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index fd868fb9d922..d72566896755 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -284,23 +284,72 @@  static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
 	}
 }
 
-static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
+struct hyp_shared_pfn {
+	u64 pfn;
+	int count;
+	struct rb_node node;
+};
+
+static DEFINE_MUTEX(hyp_shared_pfns_lock);
+static struct rb_root hyp_shared_pfns = RB_ROOT;
+
+static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
+					      struct rb_node **parent)
 {
-	phys_addr_t addr;
-	int ret;
+	struct hyp_shared_pfn *this;
+
+	*node = &hyp_shared_pfns.rb_node;
+	*parent = NULL;
+	while (**node) {
+		this = container_of(**node, struct hyp_shared_pfn, node);
+		*parent = **node;
+		if (this->pfn < pfn)
+			*node = &((**node)->rb_left);
+		else if (this->pfn > pfn)
+			*node = &((**node)->rb_right);
+		else
+			return this;
+	}
 
-	for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
-		ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
-					__phys_to_pfn(addr));
-		if (ret)
-			return ret;
+	return NULL;
+}
+
+static int share_pfn_hyp(u64 pfn)
+{
+	struct rb_node **node, *parent;
+	struct hyp_shared_pfn *this;
+	int ret = 0;
+
+	mutex_lock(&hyp_shared_pfns_lock);
+	this = find_shared_pfn(pfn, &node, &parent);
+	if (this) {
+		this->count++;
+		goto unlock;
 	}
 
-	return 0;
+	this = kzalloc(sizeof(*this), GFP_KERNEL);
+	if (!this) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+
+	this->pfn = pfn;
+	this->count = 1;
+	rb_link_node(&this->node, parent, node);
+	rb_insert_color(&this->node, &hyp_shared_pfns);
+	ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1);
+unlock:
+	mutex_unlock(&hyp_shared_pfns_lock);
+
+	return ret;
 }
 
 int kvm_share_hyp(void *from, void *to)
 {
+	phys_addr_t start, end, cur;
+	u64 pfn;
+	int ret;
+
 	if (is_kernel_in_hyp_mode())
 		return 0;
 
@@ -315,7 +364,16 @@  int kvm_share_hyp(void *from, void *to)
 	if (kvm_host_owns_hyp_mappings())
 		return create_hyp_mappings(from, to, PAGE_HYP);
 
-	return pkvm_share_hyp(__pa(from), __pa(to));
+	start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
+	end = PAGE_ALIGN(__pa(to));
+	for (cur = start; cur < end; cur += PAGE_SIZE) {
+		pfn = __phys_to_pfn(cur);
+		ret = share_pfn_hyp(pfn);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
 }
 
 /**