Message ID | 20241211-vma-v11-4-466640428fc3@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Rust support for mm_struct, vm_area_struct, and mmap | expand |
"Alice Ryhl" <aliceryhl@google.com> writes: > Currently, the binder driver always uses the mmap lock to make changes > to its vma. Because the mmap lock is global to the process, this can > involve significant contention. However, the kernel has a feature called > per-vma locks, which can significantly reduce contention. For example, > you can take a vma lock in parallel with an mmap write lock. This is > important because contention on the mmap lock has been a long-term > recurring challenge for the Binder driver. > > This patch introduces support for using `lock_vma_under_rcu` from Rust. > The Rust Binder driver will be able to use this to reduce contention on > the mmap lock. > > Acked-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> (for mm bits) > Reviewed-by: Jann Horn <jannh@google.com> > Signed-off-by: Alice Ryhl <aliceryhl@google.com> > --- > rust/helpers/mm.c | 5 +++++ > rust/kernel/mm.rs | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 61 insertions(+) > > diff --git a/rust/helpers/mm.c b/rust/helpers/mm.c > index 7b72eb065a3e..81b510c96fd2 100644 > --- a/rust/helpers/mm.c > +++ b/rust/helpers/mm.c > @@ -43,3 +43,8 @@ struct vm_area_struct *rust_helper_vma_lookup(struct mm_struct *mm, > { > return vma_lookup(mm, addr); > } > + > +void rust_helper_vma_end_read(struct vm_area_struct *vma) > +{ > + vma_end_read(vma); > +} > diff --git a/rust/kernel/mm.rs b/rust/kernel/mm.rs > index ace8e7d57afe..425b73a9dfe6 100644 > --- a/rust/kernel/mm.rs > +++ b/rust/kernel/mm.rs > @@ -13,6 +13,7 @@ > use core::{ops::Deref, ptr::NonNull}; > > pub mod virt; > +use virt::VmAreaRef; > > /// A wrapper for the kernel's `struct mm_struct`. > /// > @@ -170,6 +171,32 @@ pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser { > unsafe { &*ptr.cast() } > } > > + /// Attempt to access a vma using the vma read lock. > + /// > + /// This is an optimistic trylock operation, so it may fail if there is contention. In that > + /// case, you should fall back to taking the mmap read lock. > + /// > + /// When per-vma locks are disabled, this always returns `None`. > + #[inline] > + pub fn lock_vma_under_rcu(&self, vma_addr: usize) -> Option<VmaReadGuard<'_>> { > + #[cfg(CONFIG_PER_VMA_LOCK)] > + { > + // SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where > + // `mm_users` is non-zero. > + let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr as _) }; Is `as _` the right approach here? Best regards, Andreas Hindborg
diff --git a/rust/helpers/mm.c b/rust/helpers/mm.c index 7b72eb065a3e..81b510c96fd2 100644 --- a/rust/helpers/mm.c +++ b/rust/helpers/mm.c @@ -43,3 +43,8 @@ struct vm_area_struct *rust_helper_vma_lookup(struct mm_struct *mm, { return vma_lookup(mm, addr); } + +void rust_helper_vma_end_read(struct vm_area_struct *vma) +{ + vma_end_read(vma); +} diff --git a/rust/kernel/mm.rs b/rust/kernel/mm.rs index ace8e7d57afe..425b73a9dfe6 100644 --- a/rust/kernel/mm.rs +++ b/rust/kernel/mm.rs @@ -13,6 +13,7 @@ use core::{ops::Deref, ptr::NonNull}; pub mod virt; +use virt::VmAreaRef; /// A wrapper for the kernel's `struct mm_struct`. /// @@ -170,6 +171,32 @@ pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser { unsafe { &*ptr.cast() } } + /// Attempt to access a vma using the vma read lock. + /// + /// This is an optimistic trylock operation, so it may fail if there is contention. In that + /// case, you should fall back to taking the mmap read lock. + /// + /// When per-vma locks are disabled, this always returns `None`. + #[inline] + pub fn lock_vma_under_rcu(&self, vma_addr: usize) -> Option<VmaReadGuard<'_>> { + #[cfg(CONFIG_PER_VMA_LOCK)] + { + // SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where + // `mm_users` is non-zero. + let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr as _) }; + if !vma.is_null() { + return Some(VmaReadGuard { + // SAFETY: If `lock_vma_under_rcu` returns a non-null ptr, then it points at a + // valid vma. The vma is stable for as long as the vma read lock is held. + vma: unsafe { VmAreaRef::from_raw(vma) }, + _nts: NotThreadSafe, + }); + } + } + + None + } + /// Lock the mmap read lock. #[inline] pub fn mmap_read_lock(&self) -> MmapReadGuard<'_> { @@ -238,3 +265,32 @@ fn drop(&mut self) { unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) }; } } + +/// A guard for the vma read lock. +/// +/// # Invariants +/// +/// This `VmaReadGuard` guard owns the vma read lock. +pub struct VmaReadGuard<'a> { + vma: &'a VmAreaRef, + // `vma_end_read` must be called on the same thread as where the lock was taken + _nts: NotThreadSafe, +} + +// Make all `VmAreaRef` methods available on `VmaReadGuard`. +impl Deref for VmaReadGuard<'_> { + type Target = VmAreaRef; + + #[inline] + fn deref(&self) -> &VmAreaRef { + self.vma + } +} + +impl Drop for VmaReadGuard<'_> { + #[inline] + fn drop(&mut self) { + // SAFETY: We hold the read lock by the type invariants. + unsafe { bindings::vma_end_read(self.vma.as_ptr()) }; + } +}