@@ -147,6 +147,7 @@ struct svc_rdma_recv_ctxt {
u32 rc_inv_rkey;
struct svc_rdma_payload rc_read_payload;
__be32 *rc_reply_chunk;
+ unsigned int rc_num_write_chunks;
struct page *rc_pages[RPCSVC_MAXPAGES];
};
@@ -194,6 +194,7 @@ svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
out:
ctxt->rc_page_count = 0;
ctxt->rc_read_payload.rp_length = 0;
+ ctxt->rc_num_write_chunks = 0;
return ctxt;
out_empty:
@@ -488,6 +489,7 @@ static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt)
if (!p)
return false;
}
+ rctxt->rc_num_write_chunks = chcount;
if (!chcount)
rctxt->rc_read_payload.rp_chunk = NULL;
return chcount < 2;
@@ -625,7 +625,7 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
/* Send the page list in the Reply chunk only if the
* client did not provide Write chunks.
*/
- if (!rctxt->rc_read_payload.rp_chunk && xdr->page_len) {
+ if (!rctxt->rc_num_write_chunks && xdr->page_len) {
ret = svc_rdma_pages_write(info, xdr, xdr->head[0].iov_len,
xdr->page_len);
if (ret < 0)
@@ -467,7 +467,7 @@ svc_rdma_encode_write_list(const struct svc_rdma_recv_ctxt *rctxt,
ssize_t len, ret;
len = 0;
- if (rctxt->rc_read_payload.rp_chunk) {
+ if (rctxt->rc_num_write_chunks) {
ret = svc_rdma_encode_write_chunk(sctxt,
&rctxt->rc_read_payload);
if (ret < 0)
@@ -564,7 +564,7 @@ static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
const struct svc_rdma_recv_ctxt *rctxt,
struct xdr_buf *xdr)
{
- bool read_payload_present = rctxt && rctxt->rc_read_payload.rp_chunk;
+ bool read_payload_present = rctxt && rctxt->rc_num_write_chunks;
int elements;
/* For small messages, copying bytes is cheaper than DMA mapping.
@@ -628,7 +628,7 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
tailbase = xdr->tail[0].iov_base;
taillen = xdr->tail[0].iov_len;
- if (rctxt && rctxt->rc_read_payload.rp_chunk) {
+ if (rctxt && rctxt->rc_num_write_chunks) {
u32 xdrpad;
xdrpad = xdr_pad_size(xdr->page_len);
@@ -713,7 +713,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
* have added XDR padding in the tail buffer, and that
* should not be included inline.
*/
- if (rctxt && rctxt->rc_read_payload.rp_chunk) {
+ if (rctxt && rctxt->rc_num_write_chunks) {
base = xdr->tail[0].iov_base;
len = xdr->tail[0].iov_len;
xdr_pad = xdr_pad_size(xdr->page_len);
@@ -952,7 +952,7 @@ int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
struct xdr_buf uninitialized_var(subbuf);
struct svcxprt_rdma *rdma;
- if (!rctxt->rc_read_payload.rp_chunk || !length)
+ if (!rctxt->rc_num_write_chunks || !length)
return 0;
/* XXX: Just one READ payload slot for now, since our
Have the Call header decoder count the number of Write chunks it finds and cache that count for use in the Send path. Currently, the Linux NFS server implementation accepts only zero or one, but a subsequent patch will allow it to handle more than one. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> --- include/linux/sunrpc/svc_rdma.h | 1 + net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 2 ++ net/sunrpc/xprtrdma/svc_rdma_rw.c | 2 +- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 10 +++++----- 4 files changed, 9 insertions(+), 6 deletions(-)