diff mbox series

[v1,4/4] svcrdma: Clean up allocation of svc_rdma_rw_ctxt

Message ID 168597070368.7694.12360357990371202566.stgit@manet.1015granger.net (mailing list archive)
State Not Applicable
Headers show
Series NUMA memory optimizations for NFS/RDMA server | expand

Commit Message

Chuck Lever June 5, 2023, 1:11 p.m. UTC
From: Chuck Lever <chuck.lever@oracle.com>

The physical device's favored NUMA node ID is available when
allocating a rw_ctxt. Use that value instead of relying on the
assumption that the memory allocation happens to be running on a
node close to the device.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 net/sunrpc/xprtrdma/svc_rdma_rw.c |   10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 11cf7c646644..068c365e7812 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -62,8 +62,8 @@  svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
 	if (node) {
 		ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
 	} else {
-		ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
-			       GFP_KERNEL);
+		ctxt = kmalloc_node(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
+				    GFP_KERNEL, ibdev_to_node(rdma->sc_cm_id->device));
 		if (!ctxt)
 			goto out_noctx;
 
@@ -234,7 +234,8 @@  svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma,
 {
 	struct svc_rdma_write_info *info;
 
-	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	info = kmalloc_node(sizeof(*info), GFP_KERNEL,
+			    ibdev_to_node(rdma->sc_cm_id->device));
 	if (!info)
 		return info;
 
@@ -304,7 +305,8 @@  svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
 {
 	struct svc_rdma_read_info *info;
 
-	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	info = kmalloc_node(sizeof(*info), GFP_KERNEL,
+			    ibdev_to_node(rdma->sc_cm_id->device));
 	if (!info)
 		return info;