Message ID | 20211201170411.1561936-11-qperret@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: arm64: Introduce kvm_share_hyp() | expand |
On Wed, 1 Dec 2021 at 17:04, 'Quentin Perret' via kernel-team <kernel-team@android.com> wrote: > > From: Will Deacon <will@kernel.org> > > In preparation for adding additional locked sections for manipulating > page-tables at EL2, introduce some simple wrappers around the host and > hypervisor locks so that it's a bit easier to read and bit more difficult > to take the wrong lock (or even take them in the wrong order). Looks good, but how does this help prevent taking locks in the wrong order? > > Signed-off-by: Will Deacon <will@kernel.org> > Signed-off-by: Quentin Perret <qperret@google.com> > --- > arch/arm64/kvm/hyp/nvhe/mem_protect.c | 32 ++++++++++++++++++++++----- > 1 file changed, 26 insertions(+), 6 deletions(-) > > diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c > index c1a90dd022b8..757dfefe3aeb 100644 > --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c > +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c > @@ -27,6 +27,26 @@ static struct hyp_pool host_s2_pool; > > const u8 pkvm_hyp_id = 1; > > +static void host_lock_component(void) > +{ > + hyp_spin_lock(&host_kvm.lock); > +} > + > +static void host_unlock_component(void) > +{ > + hyp_spin_unlock(&host_kvm.lock); > +} > + > +static void hyp_lock_component(void) > +{ > + hyp_spin_lock(&pkvm_pgd_lock); > +} > + > +static void hyp_unlock_component(void) > +{ > + hyp_spin_unlock(&pkvm_pgd_lock); > +} > + > static void *host_s2_zalloc_pages_exact(size_t size) > { > void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size)); > @@ -338,14 +358,14 @@ static int host_stage2_idmap(u64 addr) > > prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT; > > - hyp_spin_lock(&host_kvm.lock); > + host_lock_component(); > ret = host_stage2_adjust_range(addr, &range); > if (ret) > goto unlock; > > ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot); > unlock: > - hyp_spin_unlock(&host_kvm.lock); > + host_unlock_component(); > > return ret; > } > @@ -369,8 +389,8 @@ int __pkvm_host_share_hyp(u64 pfn) > if (!addr_is_memory(addr)) > return -EINVAL; > > - hyp_spin_lock(&host_kvm.lock); > - hyp_spin_lock(&pkvm_pgd_lock); > + host_lock_component(); > + hyp_lock_component(); > > ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, NULL); > if (ret) > @@ -432,8 +452,8 @@ int __pkvm_host_share_hyp(u64 pfn) > BUG_ON(ret); > > unlock: > - hyp_spin_unlock(&pkvm_pgd_lock); > - hyp_spin_unlock(&host_kvm.lock); > + hyp_unlock_component(); > + host_unlock_component(); > > return ret; > } > -- > 2.34.0.rc2.393.gf8c9666880-goog > > -- > To unsubscribe from this group and stop receiving emails from it, send an email to kernel-team+unsubscribe@android.com. >
On Tue, Dec 14, 2021 at 02:48:30PM +0000, Andrew Walbran wrote: > On Wed, 1 Dec 2021 at 17:04, 'Quentin Perret' via kernel-team > <kernel-team@android.com> wrote: > > > > From: Will Deacon <will@kernel.org> > > > > In preparation for adding additional locked sections for manipulating > > page-tables at EL2, introduce some simple wrappers around the host and > > hypervisor locks so that it's a bit easier to read and bit more difficult > > to take the wrong lock (or even take them in the wrong order). > Looks good, but how does this help prevent taking locks in the wrong order? I just found that I would easily forget what exactly was protected by "pkvm_pgd_lock" and so relating that back to "take host before hyp" was error-prone. Having helpers with "host" and "hyp" in the name helps me with that, at least. Will
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index c1a90dd022b8..757dfefe3aeb 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -27,6 +27,26 @@ static struct hyp_pool host_s2_pool; const u8 pkvm_hyp_id = 1; +static void host_lock_component(void) +{ + hyp_spin_lock(&host_kvm.lock); +} + +static void host_unlock_component(void) +{ + hyp_spin_unlock(&host_kvm.lock); +} + +static void hyp_lock_component(void) +{ + hyp_spin_lock(&pkvm_pgd_lock); +} + +static void hyp_unlock_component(void) +{ + hyp_spin_unlock(&pkvm_pgd_lock); +} + static void *host_s2_zalloc_pages_exact(size_t size) { void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size)); @@ -338,14 +358,14 @@ static int host_stage2_idmap(u64 addr) prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT; - hyp_spin_lock(&host_kvm.lock); + host_lock_component(); ret = host_stage2_adjust_range(addr, &range); if (ret) goto unlock; ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot); unlock: - hyp_spin_unlock(&host_kvm.lock); + host_unlock_component(); return ret; } @@ -369,8 +389,8 @@ int __pkvm_host_share_hyp(u64 pfn) if (!addr_is_memory(addr)) return -EINVAL; - hyp_spin_lock(&host_kvm.lock); - hyp_spin_lock(&pkvm_pgd_lock); + host_lock_component(); + hyp_lock_component(); ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, NULL); if (ret) @@ -432,8 +452,8 @@ int __pkvm_host_share_hyp(u64 pfn) BUG_ON(ret); unlock: - hyp_spin_unlock(&pkvm_pgd_lock); - hyp_spin_unlock(&host_kvm.lock); + hyp_unlock_component(); + host_unlock_component(); return ret; }