From patchwork Fri Nov 22 15:40:29 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alice Ryhl X-Patchwork-Id: 13883305 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from kanga.kvack.org (kanga.kvack.org [205.233.56.17]) by smtp.lore.kernel.org (Postfix) with ESMTP id 0DBA1E6916C for ; Fri, 22 Nov 2024 15:41:17 +0000 (UTC) Received: by kanga.kvack.org (Postfix) id 15FCD6B00B3; Fri, 22 Nov 2024 10:41:15 -0500 (EST) Received: by kanga.kvack.org (Postfix, from userid 40) id 111F16B00B6; Fri, 22 Nov 2024 10:41:15 -0500 (EST) X-Delivered-To: int-list-linux-mm@kvack.org Received: by kanga.kvack.org (Postfix, from userid 63042) id EF3806B00B8; Fri, 22 Nov 2024 10:41:14 -0500 (EST) X-Delivered-To: linux-mm@kvack.org Received: from relay.hostedemail.com (smtprelay0016.hostedemail.com [216.40.44.16]) by kanga.kvack.org (Postfix) with ESMTP id CE7826B00B3 for ; Fri, 22 Nov 2024 10:41:14 -0500 (EST) Received: from smtpin27.hostedemail.com (a10.router.float.18 [10.200.18.1]) by unirelay10.hostedemail.com (Postfix) with ESMTP id 5787FC1870 for ; Fri, 22 Nov 2024 15:41:14 +0000 (UTC) X-FDA: 82814142432.27.D0AB6FB Received: from mail-yb1-f202.google.com (mail-yb1-f202.google.com [209.85.219.202]) by imf25.hostedemail.com (Postfix) with ESMTP id 52D3CA0008 for ; Fri, 22 Nov 2024 15:40:33 +0000 (UTC) Authentication-Results: imf25.hostedemail.com; dkim=pass header.d=google.com header.s=20230601 header.b=Pz6KPYRa; dmarc=pass (policy=reject) header.from=google.com; spf=pass (imf25.hostedemail.com: domain of 3F6ZAZwkKCKYGROIKXeNRMUUMRK.IUSROTad-SSQbGIQ.UXM@flex--aliceryhl.bounces.google.com designates 209.85.219.202 as permitted sender) smtp.mailfrom=3F6ZAZwkKCKYGROIKXeNRMUUMRK.IUSROTad-SSQbGIQ.UXM@flex--aliceryhl.bounces.google.com ARC-Seal: i=1; s=arc-20220608; d=hostedemail.com; t=1732289885; a=rsa-sha256; cv=none; b=IdUU2jSym/imepkUfB4q5ZjLMofsnrcHSyIsNKux67ctYny9LT8qoqpr3+BpNxS7saSm8z Us43GWPmlDBpgHK2OPE83AmFtBe//PdZm+3+Z0BVoT/RIUXLvdebj0gfIY9JmhOh1tMQRt jeT9xt8cimjp+2hjPOJ8wVf81MzDg4Y= ARC-Authentication-Results: i=1; imf25.hostedemail.com; dkim=pass header.d=google.com header.s=20230601 header.b=Pz6KPYRa; dmarc=pass (policy=reject) header.from=google.com; spf=pass (imf25.hostedemail.com: domain of 3F6ZAZwkKCKYGROIKXeNRMUUMRK.IUSROTad-SSQbGIQ.UXM@flex--aliceryhl.bounces.google.com designates 209.85.219.202 as permitted sender) smtp.mailfrom=3F6ZAZwkKCKYGROIKXeNRMUUMRK.IUSROTad-SSQbGIQ.UXM@flex--aliceryhl.bounces.google.com ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=hostedemail.com; s=arc-20220608; t=1732289885; h=from:from:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:cc:mime-version:mime-version: content-type:content-type:content-transfer-encoding: in-reply-to:in-reply-to:references:references:dkim-signature; bh=St2nivlEGlwAOUor5slXoMT0nSTJUF24z+m1sub+EPE=; b=crwpbokR97DxVnYFwTuSKLm1J+Pq9LQPctSeoGPwKXc159pIVAYtwATEUcCnRb+jTzhd0z O000ggP+fiyxf8le5GqgFmSyS0w8z5p7V7Su65xWygmrb4yhlGA8MOG7gcDpkEZxmXbgCQ jEQ9LRf4YWdj9UnOy7+grC+yFTi0f5Y= Received: by mail-yb1-f202.google.com with SMTP id 3f1490d57ef6-e3859de1c11so3977211276.1 for ; Fri, 22 Nov 2024 07:41:12 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1732290071; x=1732894871; darn=kvack.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=St2nivlEGlwAOUor5slXoMT0nSTJUF24z+m1sub+EPE=; b=Pz6KPYRa246zh5HSo9BOn7Q3sNFSE2Viz0rjqtwr5bHcNuV6s3DQZePye0c/QQSqBg JF/2E7hGU/IwJCIbU4BE4PZ/jcaK4RMsGCciSCrcgsUcTDMtqyBHwlF1l+f/I+5UXUBv QDi/usXUcv+X8vtUUHtRJorFiCi6C9LzT9ZBiAraGhmOakLX5sIqyEoILQNMI/xRAyQe 3TNBnqiBFH9WT47Dq3NrNLKX2jhqYVtXOiGhXr8jCKPh29x81qUnkHDcP37qviExwywa SFyM7Ln2yMTlvvlu5rinFL6YKrtBauxKLByEVVYuyB9xMOYRaGFq24CsSosIeYZn3y26 apeQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1732290071; x=1732894871; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=St2nivlEGlwAOUor5slXoMT0nSTJUF24z+m1sub+EPE=; b=j37wbhQ/1RKYuo+qHQf6eoUjllhxh5ZRt2wiJcjZNzXlUqhunOI2KgHtR/OsBq+MIo kPDzDzu66gcBoTw3yWNkwse/1i5xDqER8VGho/RmwbG+huMgqScFzMvcx/4LgmI42uNh 8MUeBOtb6xhDcjMITy2evPR3ytgllui/4J3+xsokIA9oSEL8G2z9s/30i+OSyZ1uQwiy jqEprumwYZZG+9x+W2g6UJaGyI4gZx+s7pel3f/j6AveT4nQZJ/FA32TGG9kUrg9wg3a wBkmGO7BhdJckhKYflH3eLoKxTRj9t2VnLIRPep5vXrw160SQjNlwsXokX+laxEk+K+M KCVA== X-Forwarded-Encrypted: i=1; AJvYcCXIW8BMOsE0mnAQ7QzVRMz+78kCR6LrPLl2igvKZsMWjOUa3/q3YD4xD5U3pqbLCZV0we3WHCZywA==@kvack.org X-Gm-Message-State: AOJu0YzZISpar1WVD53NfpQw3aWzJ6nt9SGWi9isKWHdATJqrpLkPOQU j5iXJQpWlHeAKcquC0eOR0n/bgijr32JecrcWHFVwBh96eMBsdumnpbyp2jQasVu5EOw6Dv91Ae RgXxrZZslki5Ehw== X-Google-Smtp-Source: AGHT+IGihGzFbF764Ujgj/U6cdWN/yYJN8fQNEHsMDvsZY7MAEQSnHOETorjJsB9VNEV6/q+QcUl9/lfdUnOIbU= X-Received: from aliceryhl.c.googlers.com ([fda3:e722:ac3:cc00:68:fe9:ac10:f29e]) (user=aliceryhl job=sendgmr) by 2002:a25:d80d:0:b0:e38:14f9:cd0f with SMTP id 3f1490d57ef6-e38f8bd8497mr2055276.6.1732290071512; Fri, 22 Nov 2024 07:41:11 -0800 (PST) Date: Fri, 22 Nov 2024 15:40:29 +0000 In-Reply-To: <20241122-vma-v9-0-7127bfcdd54e@google.com> Mime-Version: 1.0 References: <20241122-vma-v9-0-7127bfcdd54e@google.com> X-Developer-Key: i=aliceryhl@google.com; a=openpgp; fpr=49F6C1FAA74960F43A5B86A1EE7A392FDE96209F X-Developer-Signature: v=1; a=openpgp-sha256; l=3903; i=aliceryhl@google.com; h=from:subject:message-id; bh=fkBx2F6iPu2x+Ij6OXEgUbDcOuLkhOhWgbDBfI9k5bI=; b=owEBbQKS/ZANAwAKAQRYvu5YxjlGAcsmYgBnQKYIUymIZaDH58EKhKDYnMp2XcdauVFk9Sm61 K6mH7jVt0qJAjMEAAEKAB0WIQSDkqKUTWQHCvFIvbIEWL7uWMY5RgUCZ0CmCAAKCRAEWL7uWMY5 RgCfEACMPTyXW8HWQtwKk1nhcmhNoYFoxeHTiCbS+WoDdEOJSxJ3c+x20o6T0joQfWF8aB8rSS6 TASaxo1CMMJ3r+AoFuMY78oczohJuVeXyUhJYgUitIgivvPiZNixny1hlMWK0Xrj6gIkjjB8DTt jkShgEo5cB33v67l0sRgK4wmY1SauayQ1ntYHyWSybXHtGfF15xJ3/nhxtiJ6rZaW+//wB/mucA ufgaHuVbWelVOe8GUxHBfG+R7rvn1Gjt0BqCDOXbQttgKSLD5SyHoCyGDZq4UasnoQ5A/X4sk3v nS5AFaB7/Mg6hyyl4KYCIIKxl3EYd16h5ZFbo+I+ql56DHocbFh5CI7Ua008D79AQ7cM6njhjyX HZJNHusiqgZhJvlRJni+AS1QFL/iwVXkWRIXOx08LHJz2pTnYNIYsyEBlKI9TajaIn114X4JcFQ 9+tFvuikoWe3KltS+cRPIKw0yuhtnwOW8vrudu8qk8UDp+pqBa8KQUuUO/08gSQxQYsKSBjcT7+ Ek79cFhxL5FHrSgMjpezkk1oD6SyO4zAtCU10r04fonsgBprYzdRJO2mmucn3aUjaTxDCI/vbHs ZAh6Omw23AffKbI5tymnUbecXpHG+lxRMHay6RHD3Ue9KH3GvLvC5pyFaqY5NsXg3WVT95KLASX 5Qz6Pc2WbMCv8HQ== X-Mailer: b4 0.13.0 Message-ID: <20241122-vma-v9-4-7127bfcdd54e@google.com> Subject: [PATCH v9 4/8] mm: rust: add lock_vma_under_rcu From: Alice Ryhl To: Miguel Ojeda , Matthew Wilcox , Lorenzo Stoakes , Vlastimil Babka , John Hubbard , "Liam R. Howlett" , Andrew Morton , Greg Kroah-Hartman , Arnd Bergmann , Christian Brauner , Jann Horn , Suren Baghdasaryan Cc: Alex Gaynor , Boqun Feng , Gary Guo , " =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= " , Benno Lossin , linux-kernel@vger.kernel.org, linux-mm@kvack.org, rust-for-linux@vger.kernel.org, Alice Ryhl , Andreas Hindborg X-Stat-Signature: hfxghia6e6byo44dykapfi7ojt6k1s5u X-Rspamd-Queue-Id: 52D3CA0008 X-Rspamd-Server: rspam08 X-Rspam-User: X-HE-Tag: 1732290033-391370 X-HE-Meta: U2FsdGVkX1/8gF4IO73splE/1jyQLlOuMb2L2sw/4HdEcZOjLQlCDAPz70nyTpJUkZVqgXDnBJ7zwkJQR+xKQO3TVN1VUVZTmOMd/bydmHhfl7ECT4bhm+EpQbFu541s28OogQnSE0HmFLbVZGmQKm9Lj+lAo1ySzp4RA9t0pcZTkLQoOPxF4BtdbX5cbW2LlKEOqRxQvGVHSybL03uH71yCTc/aJUndjVGMxbzSiXmK1//Ed1JJ5tMc/DTMXfy6D963UuyOJRRqPPGly0tEDbtKhO3ZA/3v6EkSfvhOGXOYMI3NFUiXi3UuL5Qa1Ju6CUApFcsOAUuZOulfEITHPC/oJ/yAEEMgCFJFS+l8dJbqoJMmgRrYng5hpoTvbpo4QvKVfqwOSDv/PNR1igT0J49zT3v3x1HxybBVUVggzCEVrbTlVT/M2vFR37WT8wlx7NiTg7BJhHgakts9x08CgAe4R6ztwypt2AQ8V+KdqdtHQczfuSfs1Fk6ZoA7HjHdIad2zJBH1I9YDSE5qUcGXxoiCW9v14ByHMhjS1EMqz0LnjoBGwBL55Ks1TrduTCXpV3xJPHqmNdGcPLb/0LXDgKVH8pnMIg1yYKB6fEMqS6y3jz8qxMNDjIN9uRf1CCBU8NFEZQX7yxYIg2OHI8w3JSxm75kkqfFkjYdxT4JZTDeOMp7AJyAHkCVEggfV8btg2ufd72RrOL16Oe9Pt51nezzDyoiAC4WQP2AvGOs4E+Mh3KiAnWxgO7Kmf+kqOXi05/yEZm19IZ+dnLxwv4MG1iWdzYhxGvtSAH/1SWxCAu2esYQE7po1Hc5Rz0t31p2JSKo1po5HR7++fRaudy9NV2l3I8wZ1zsf5E3YCNOOb8YtWr60AiEyt3xE2jZI0gQBuZi0mg4iafGSlayl/6V1Zu8lcwkwRbH3Pt3OohFj0NTdPqGDUhjLnldSLmT7QG6r65uWHvK8QB0YnVXrje Id3HOYRA RMxSsosM0BluMnhn1TfB70lPc/sULv1O/Wk6yZ9TIsfQYx6pwnc4jbQoVnCU5OOy1LWGAulTvsqRd5++WWfu/6aPltGj8W6+V165N2jv/QpG7PvOHE8rDAnq0f1H4+/4K/zRtVHM9suhZ4P5dudRFy/EVd1sSRiDBSGtDMrV2tzS5xAIJPbeGXRGUnNw8AinGccfsZvqcd4i7loB+tsx+aEbLrABtpAywEutP4F6rO5csKrXII6/5+RgqnvJw1Lur2C9vo/4PKtCNwK+YDNbyQrESitkA9xYBeWCZUi2zpC+zlhh4GbGQII9k4iKUvMsNbNVBv7oNZ6WwUboWywlXvBAT9XdVjIegBHmpiiKW3tjuOwCblZAVC3JDrg1YsXq3dhj3dWDVmPwdXVcM3k2xdWISqqs4ESwlS0Y2MmXVFqu1ozfPYwj35LCvBQTv7w3ExlkCHTUcV/wMtbfiXvFIeITQbWmrCPPBoIVdobve6+hU+UCo5MhQXAIrg25U8sL6rDaybA1ShUZgq5XnOkSUUCrohjxkw+mf/5ephTGnRn+cKP9aHzI0iPUGB8L9HS0rl9/r6SZ9o0BA6SbcOSDdEL+MJFXs2VoTKpt67MgLwckhaNrEPlX6GXaU8TspRoLHwmbGc6hJzkOd1fDRPcJyhfZSYYUha4CjfzNjYcs2nSJregk25r5PRrFWCywlMFtLzJ5ZZH1ffmX+pv8= X-Bogosity: Ham, tests=bogofilter, spamicity=0.000000, version=1.2.4 Sender: owner-linux-mm@kvack.org Precedence: bulk X-Loop: owner-majordomo@kvack.org List-ID: List-Subscribe: List-Unsubscribe: Currently, the binder driver always uses the mmap lock to make changes to its vma. Because the mmap lock is global to the process, this can involve significant contention. However, the kernel has a feature called per-vma locks, which can significantly reduce contention. For example, you can take a vma lock in parallel with an mmap write lock. This is important because contention on the mmap lock has been a long-term recurring challenge for the Binder driver. This patch introduces support for using `lock_vma_under_rcu` from Rust. The Rust Binder driver will be able to use this to reduce contention on the mmap lock. Acked-by: Lorenzo Stoakes (for mm bits) Signed-off-by: Alice Ryhl --- rust/helpers/mm.c | 5 +++++ rust/kernel/mm.rs | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/rust/helpers/mm.c b/rust/helpers/mm.c index 7b72eb065a3e..81b510c96fd2 100644 --- a/rust/helpers/mm.c +++ b/rust/helpers/mm.c @@ -43,3 +43,8 @@ struct vm_area_struct *rust_helper_vma_lookup(struct mm_struct *mm, { return vma_lookup(mm, addr); } + +void rust_helper_vma_end_read(struct vm_area_struct *vma) +{ + vma_end_read(vma); +} diff --git a/rust/kernel/mm.rs b/rust/kernel/mm.rs index ace8e7d57afe..425b73a9dfe6 100644 --- a/rust/kernel/mm.rs +++ b/rust/kernel/mm.rs @@ -13,6 +13,7 @@ use core::{ops::Deref, ptr::NonNull}; pub mod virt; +use virt::VmAreaRef; /// A wrapper for the kernel's `struct mm_struct`. /// @@ -170,6 +171,32 @@ pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser { unsafe { &*ptr.cast() } } + /// Attempt to access a vma using the vma read lock. + /// + /// This is an optimistic trylock operation, so it may fail if there is contention. In that + /// case, you should fall back to taking the mmap read lock. + /// + /// When per-vma locks are disabled, this always returns `None`. + #[inline] + pub fn lock_vma_under_rcu(&self, vma_addr: usize) -> Option> { + #[cfg(CONFIG_PER_VMA_LOCK)] + { + // SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where + // `mm_users` is non-zero. + let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr as _) }; + if !vma.is_null() { + return Some(VmaReadGuard { + // SAFETY: If `lock_vma_under_rcu` returns a non-null ptr, then it points at a + // valid vma. The vma is stable for as long as the vma read lock is held. + vma: unsafe { VmAreaRef::from_raw(vma) }, + _nts: NotThreadSafe, + }); + } + } + + None + } + /// Lock the mmap read lock. #[inline] pub fn mmap_read_lock(&self) -> MmapReadGuard<'_> { @@ -238,3 +265,32 @@ fn drop(&mut self) { unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) }; } } + +/// A guard for the vma read lock. +/// +/// # Invariants +/// +/// This `VmaReadGuard` guard owns the vma read lock. +pub struct VmaReadGuard<'a> { + vma: &'a VmAreaRef, + // `vma_end_read` must be called on the same thread as where the lock was taken + _nts: NotThreadSafe, +} + +// Make all `VmAreaRef` methods available on `VmaReadGuard`. +impl Deref for VmaReadGuard<'_> { + type Target = VmAreaRef; + + #[inline] + fn deref(&self) -> &VmAreaRef { + self.vma + } +} + +impl Drop for VmaReadGuard<'_> { + #[inline] + fn drop(&mut self) { + // SAFETY: We hold the read lock by the type invariants. + unsafe { bindings::vma_end_read(self.vma.as_ptr()) }; + } +}