@@ -712,6 +712,8 @@ void fill_post_wcc(struct svc_fh *fhp)
*p = 0;
rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3);
}
+ svc_encode_read_payload(rqstp, rqstp->rq_res.head[0].iov_len,
+ resp->len);
return 1;
} else
return xdr_ressize_check(rqstp, p);
@@ -737,6 +739,8 @@ void fill_post_wcc(struct svc_fh *fhp)
*p = 0;
rqstp->rq_res.tail[0].iov_len = 4 - (resp->count & 3);
}
+ svc_encode_read_payload(rqstp, rqstp->rq_res.head[0].iov_len,
+ resp->count);
return 1;
} else
return xdr_ressize_check(rqstp, p);
@@ -3547,6 +3547,8 @@ static __be32 nfsd4_encode_splice_read(
buf->page_len = 0;
return nfserr;
}
+ svc_encode_read_payload(read->rd_rqstp, buf->head[0].iov_len,
+ maxcount);
*(p++) = htonl(eof);
*(p++) = htonl(maxcount);
@@ -3713,6 +3715,7 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
xdr_truncate_encode(xdr, length_offset);
return nfserr;
}
+ svc_encode_read_payload(readlink->rl_rqstp, length_offset, maxcount);
wire_count = htonl(maxcount);
write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4);
@@ -462,6 +462,8 @@ __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *f
*p = 0;
rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3);
}
+ svc_encode_read_payload(rqstp, rqstp->rq_res.head[0].iov_len,
+ resp->len);
return 1;
}
@@ -482,6 +484,8 @@ __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *f
*p = 0;
rqstp->rq_res.tail[0].iov_len = 4 - (resp->count&3);
}
+ svc_encode_read_payload(rqstp, rqstp->rq_res.head[0].iov_len,
+ resp->count);
return 1;
}
@@ -875,18 +875,9 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
if (wr_lst) {
/* XXX: Presume the client sent only one Write chunk */
- unsigned long offset;
- unsigned int length;
-
- if (rctxt->rc_read_payload_length) {
- offset = rctxt->rc_read_payload_offset;
- length = rctxt->rc_read_payload_length;
- } else {
- offset = xdr->head[0].iov_len;
- length = xdr->page_len;
- }
- ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
- length);
+ ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr,
+ rctxt->rc_read_payload_offset,
+ rctxt->rc_read_payload_length);
if (ret < 0)
goto err2;
svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret);
Have the NFSD encoders annotate the boundaries of every direct-data-placement eligible READ data payload. Then change svcrdma to use that annotation instead of the xdr->page_len when handling Write chunks. For NFSv4 on RDMA, that enables the ability to recognize multiple READ payloads per compound. Next step is to support multiple Write chunks. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> --- fs/nfsd/nfs3xdr.c | 4 ++++ fs/nfsd/nfs4xdr.c | 3 +++ fs/nfsd/nfsxdr.c | 4 ++++ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 15 +++------------ 4 files changed, 14 insertions(+), 12 deletions(-)