diff mbox series

[RFC,15/19] mm/util: Extend vm_account to charge pages against the pin cgroup

Message ID 76d61af0219560c03910a189f1ffa340d108858e.1674538665.git-series.apopple@nvidia.com (mailing list archive)
State New
Headers show
Series mm: Introduce a cgroup to limit the amount of locked and pinned memory | expand

Commit Message

Alistair Popple Jan. 24, 2023, 5:42 a.m. UTC
The vm_account_pinned() functions currently only account pages against
pinned_vm/locked_vm and enforce limits against RLIMIT_MEMLOCK. Extend
these to account pages and enforce limits using the pin count cgroup.

Accounting of pages will fail if either RLIMIT_MEMLOCK or the cgroup
limit is exceeded. Unlike rlimit enforcement which can be bypassed if
the user has CAP_IPC_LOCK cgroup limits can not be bypassed.

Signed-off-by: Alistair Popple <apopple@nvidia.com>
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
---
 include/linux/mm_types.h |  1 +
 mm/util.c                | 22 ++++++++++++++++++----
 2 files changed, 19 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7de2168..4adf8dc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1116,6 +1116,7 @@  struct vm_account {
 		struct mm_struct *mm;
 		struct user_struct *user;
 	} a;
+	struct pins_cgroup *pins_cg;
 	enum vm_account_flags flags;
 };
 
diff --git a/mm/util.c b/mm/util.c
index af40b1e..e5fb01a 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -442,6 +442,7 @@  void vm_account_init(struct vm_account *vm_account, struct task_struct *task,
 		vm_account->a.mm = task->mm;
 	}
 
+	vm_account->pins_cg = get_pins_cg(task);
 	vm_account->flags = flags;
 }
 EXPORT_SYMBOL_GPL(vm_account_init);
@@ -459,6 +460,7 @@  void vm_account_release(struct vm_account *vm_account)
 		free_uid(vm_account->a.user);
 	else
 		mmdrop(vm_account->a.mm);
+	put_pins_cg(vm_account->pins_cg);
 }
 EXPORT_SYMBOL_GPL(vm_account_release);
 
@@ -489,6 +491,15 @@  static int vm_account_cmpxchg(struct vm_account *vm_account,
 	}
 }
 
+static void vm_unaccount_legacy(struct vm_account *vm_account,
+				unsigned long npages)
+{
+	if (vm_account->flags & VM_ACCOUNT_USER)
+		atomic_long_sub(npages, &vm_account->a.user->locked_vm);
+	else
+		atomic64_sub(npages, &vm_account->a.mm->pinned_vm);
+}
+
 int vm_account_pinned(struct vm_account *vm_account, unsigned long npages)
 {
 	unsigned long lock_limit = RLIM_INFINITY;
@@ -506,16 +517,19 @@  int vm_account_pinned(struct vm_account *vm_account, unsigned long npages)
 			return ret;
 	}
 
+	if (pins_try_charge(vm_account->pins_cg, npages)) {
+		vm_unaccount_legacy(vm_account, npages);
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(vm_account_pinned);
 
 void vm_unaccount_pinned(struct vm_account *vm_account, unsigned long npages)
 {
-	if (vm_account->flags & VM_ACCOUNT_USER)
-		atomic_long_sub(npages, &vm_account->a.user->locked_vm);
-	else
-		atomic64_sub(npages, &vm_account->a.mm->pinned_vm);
+	vm_unaccount_legacy(vm_account, npages);
+	pins_uncharge(vm_account->pins_cg, npages);
 }
 EXPORT_SYMBOL_GPL(vm_unaccount_pinned);