@@ -517,9 +517,10 @@ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
return tot_pages;
}
-int rds_rdma_extra_size(struct rds_rdma_args *args)
+int rds_rdma_extra_size(struct rds_rdma_args *args,
+ struct rds_iov_vector *iov)
{
- struct rds_iovec vec;
+ struct rds_iovec *vec;
struct rds_iovec __user *local_vec;
int tot_pages = 0;
unsigned int nr_pages;
@@ -530,13 +531,23 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
if (args->nr_local == 0)
return -EINVAL;
+ iov->iov = kcalloc(args->nr_local,
+ sizeof(struct rds_iovec),
+ GFP_KERNEL);
+ if (!iov->iov)
+ return -ENOMEM;
+
+ vec = &iov->iov[0];
+
+ if (copy_from_user(vec, local_vec, args->nr_local *
+ sizeof(struct rds_iovec)))
+ return -EFAULT;
+ iov->len = args->nr_local;
+
/* figure out the number of pages in the vector */
- for (i = 0; i < args->nr_local; i++) {
- if (copy_from_user(&vec, &local_vec[i],
- sizeof(struct rds_iovec)))
- return -EFAULT;
+ for (i = 0; i < args->nr_local; i++, vec++) {
- nr_pages = rds_pages_in_vec(&vec);
+ nr_pages = rds_pages_in_vec(vec);
if (nr_pages == 0)
return -EINVAL;
@@ -558,15 +569,15 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
* Extract all arguments and set up the rdma_op
*/
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg)
+ struct cmsghdr *cmsg,
+ struct rds_iov_vector *vec)
{
struct rds_rdma_args *args;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
- struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
- int iov_size;
+ struct rds_iovec *iovs;
unsigned int i, j;
int ret = 0;
@@ -586,31 +597,23 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
goto out_ret;
}
- /* Check whether to allocate the iovec area */
- iov_size = args->nr_local * sizeof(struct rds_iovec);
- if (args->nr_local > UIO_FASTIOV) {
- iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
- if (!iovs) {
- ret = -ENOMEM;
- goto out_ret;
- }
+ if (vec->len != args->nr_local) {
+ ret = -EINVAL;
+ goto out_ret;
}
- if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
- ret = -EFAULT;
- goto out;
- }
+ iovs = vec->iov;
nr_pages = rds_rdma_pages(iovs, args->nr_local);
if (nr_pages < 0) {
ret = -EINVAL;
- goto out;
+ goto out_ret;
}
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
- goto out;
+ goto out_ret;
}
op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
@@ -623,7 +626,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
if (!op->op_sg) {
ret = -ENOMEM;
- goto out;
+ goto out_pages;
}
if (op->op_notify || op->op_recverr) {
@@ -635,7 +638,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
if (!op->op_notifier) {
ret = -ENOMEM;
- goto out;
+ goto out_pages;
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
@@ -681,7 +684,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
- goto out;
+ goto out_pages;
else
ret = 0;
@@ -714,13 +717,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
nr_bytes,
(unsigned int) args->remote_vec.bytes);
ret = -EINVAL;
- goto out;
+ goto out_pages;
}
op->op_bytes = nr_bytes;
-out:
- if (iovs != iovstack)
- sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
+out_pages:
kfree(pages);
out_ret:
if (ret)
@@ -386,6 +386,18 @@ static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
INIT_LIST_HEAD(&q->zcookie_head);
}
+struct rds_iov_vector {
+ struct rds_iovec *iov;
+ int len;
+};
+
+struct rds_iov_vector_arr {
+ struct rds_iov_vector *vec;
+ int len;
+ int indx;
+ int incr;
+};
+
struct rds_message {
refcount_t m_refcount;
struct list_head m_sock_item;
@@ -904,13 +916,13 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
void rds_rdma_drop_keys(struct rds_sock *rs);
-int rds_rdma_extra_size(struct rds_rdma_args *args);
-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
+int rds_rdma_extra_size(struct rds_rdma_args *args,
+ struct rds_iov_vector *iov);
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
+ struct cmsghdr *cmsg,
+ struct rds_iov_vector *vec);
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
void rds_rdma_free_op(struct rm_rdma_op *ro);
@@ -876,13 +876,15 @@ static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
* rds_message is getting to be quite complicated, and we'd like to allocate
* it all in one go. This figures out how big it needs to be up front.
*/
-static int rds_rm_size(struct msghdr *msg, int num_sgs)
+static int rds_rm_size(struct msghdr *msg, int num_sgs,
+ struct rds_iov_vector_arr *vct)
{
struct cmsghdr *cmsg;
int size = 0;
int cmsg_groups = 0;
int retval;
bool zcopy_cookie = false;
+ struct rds_iov_vector *iov, *tmp_iov;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
@@ -893,8 +895,24 @@ static int rds_rm_size(struct msghdr *msg, int num_sgs)
switch (cmsg->cmsg_type) {
case RDS_CMSG_RDMA_ARGS:
+ if (vct->indx >= vct->len) {
+ vct->len += vct->incr;
+ tmp_iov =
+ krealloc(vct->vec,
+ vct->len *
+ sizeof(struct rds_iov_vector),
+ GFP_KERNEL);
+ if (!tmp_iov) {
+ vct->len -= vct->incr;
+ return -ENOMEM;
+ }
+ vct->vec = tmp_iov;
+ }
+ iov = &vct->vec[vct->indx];
+ memset(iov, 0, sizeof(struct rds_iov_vector));
+ vct->indx++;
cmsg_groups |= 1;
- retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
+ retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
if (retval < 0)
return retval;
size += retval;
@@ -951,10 +969,11 @@ static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
}
static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
- struct msghdr *msg, int *allocated_mr)
+ struct msghdr *msg, int *allocated_mr,
+ struct rds_iov_vector_arr *vct)
{
struct cmsghdr *cmsg;
- int ret = 0;
+ int ret = 0, ind = 0;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
@@ -968,7 +987,10 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
*/
switch (cmsg->cmsg_type) {
case RDS_CMSG_RDMA_ARGS:
- ret = rds_cmsg_rdma_args(rs, rm, cmsg);
+ if (ind >= vct->indx)
+ return -ENOMEM;
+ ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
+ ind++;
break;
case RDS_CMSG_RDMA_DEST:
@@ -1084,6 +1106,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
int num_sgs = ceil(payload_len, PAGE_SIZE);
int namelen;
+ struct rds_iov_vector_arr vct = {0};
+ int ind;
+
+ /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
+ vct.incr = 1;
/* Mirror Linux UDP mirror of BSD error message compatibility */
/* XXX: Perhaps MSG_MORE someday */
@@ -1220,7 +1247,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
}
/* size of rm including all sgs */
- ret = rds_rm_size(msg, num_sgs);
+ ret = rds_rm_size(msg, num_sgs, &vct);
if (ret < 0)
goto out;
@@ -1270,7 +1297,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
rm->m_conn_path = cpath;
/* Parse any control messages the user may have included. */
- ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
+ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
if (ret) {
/* Trigger connection so that its ready for the next retry */
if (ret == -EAGAIN)
@@ -1348,9 +1375,18 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
if (ret)
goto out;
rds_message_put(rm);
+
+ for (ind = 0; ind < vct.indx; ind++)
+ kfree(vct.vec[ind].iov);
+ kfree(vct.vec);
+
return payload_len;
out:
+ for (ind = 0; ind < vct.indx; ind++)
+ kfree(vct.vec[ind].iov);
+ kfree(vct.vec);
+
/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
* If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
* or in any other way, we need to destroy the MR again */