diff mbox series

[RFC,v2,6/6] mm, migrc: Implement internal allocator to minimize impact onto vm

Message ID 20230817080559.43200-7-byungchul@sk.com (mailing list archive)
State New
Headers show
Series Reduce TLB flushes under some specific conditions | expand

Commit Message

Byungchul Park Aug. 17, 2023, 8:05 a.m. UTC
(Not sure if this patch works meaningfully. Ignore if not.)

Signed-off-by: Byungchul Park <byungchul@sk.com>
---
 mm/migrate.c | 23 +++++++++++++++++++++--
 1 file changed, 21 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/mm/migrate.c b/mm/migrate.c
index c57536a0b2a6..6b5113d5a1e2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -122,14 +122,33 @@  enum {
 	MIGRC_DST_PENDING,
 };
 
+#define MAX_MIGRC_REQ_NR	4096
+static struct migrc_req migrc_req_pool_static[MAX_MIGRC_REQ_NR];
+static atomic_t migrc_req_pool_idx = ATOMIC_INIT(-1);
+static LLIST_HEAD(migrc_req_pool_llist);
+static DEFINE_SPINLOCK(migrc_req_pool_lock);
+
 static struct migrc_req *alloc_migrc_req(void)
 {
-	return kmalloc(sizeof(struct migrc_req), GFP_KERNEL);
+	int idx = atomic_read(&migrc_req_pool_idx);
+	struct llist_node *n;
+
+	if (idx < MAX_MIGRC_REQ_NR - 1) {
+		idx = atomic_inc_return(&migrc_req_pool_idx);
+		if (idx < MAX_MIGRC_REQ_NR)
+			return migrc_req_pool_static + idx;
+	}
+
+	spin_lock(&migrc_req_pool_lock);
+	n = llist_del_first(&migrc_req_pool_llist);
+	spin_unlock(&migrc_req_pool_lock);
+
+	return n ? llist_entry(n, struct migrc_req, llnode) : NULL;
 }
 
 void free_migrc_req(struct migrc_req *req)
 {
-	kfree(req);
+	llist_add(&req->llnode, &migrc_req_pool_llist);
 }
 
 static bool migrc_is_full(int nid)