Message ID | 20171024174752.74910-6-kolga@netapp.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Nit: this could use a better subject line. On Tue, Oct 24, 2017 at 01:47:47PM -0400, Olga Kornievskaia wrote: ... > + if (!copy->cp_synchronous) { > + status = nfsd4_init_copy_res(copy, 0); > + async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); > + if (!async_copy) { > + status = nfserrno(-ENOMEM); > + goto out; > + } > + dup_copy_fields(copy, async_copy); > + memcpy(©->cp_res.cb_stateid, ©->cp_dst_stateid, > + sizeof(copy->cp_dst_stateid)); > + spin_lock(&async_copy->cp_clp->async_lock); > + list_add(&async_copy->copies, > + &async_copy->cp_clp->async_copies); > + spin_unlock(&async_copy->cp_clp->async_lock); At this point other threads could in theory look up this async_copy, but its copy_task field is not yet initialized. I don't *think* that's a problem for nfsd4_shutdown_copy, because I don't think the server could be processing rpc's for this client any more at that point. But I think a malicious client might be able to trigger a NULL dereference in nfsd4_offload_cancel. Is there any reason not to assign copy_task before adding it to this list? --b. > + async_copy->copy_task = kthread_create(nfsd4_do_async_copy, > + async_copy, "%s", "copy thread"); > + if (IS_ERR(async_copy->copy_task)) { > + status = PTR_ERR(async_copy->copy_task); > + goto out_err_dec; > + } > + wake_up_process(async_copy->copy_task); > + } else { > + status = nfsd4_do_copy(copy, 1); > } > - > - fput(src); > - fput(dst); > out: > return status; > +out_err_dec: > + cleanup_async_copy(async_copy); > + goto out; > } > > static __be32 > diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c > index 0c04f81..d7767a1 100644 > --- a/fs/nfsd/nfs4state.c > +++ b/fs/nfsd/nfs4state.c > @@ -1774,6 +1774,8 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) > #ifdef CONFIG_NFSD_PNFS > INIT_LIST_HEAD(&clp->cl_lo_states); > #endif > + INIT_LIST_HEAD(&clp->async_copies); > + spin_lock_init(&clp->async_lock); > spin_lock_init(&clp->cl_lock); > rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); > return clp; > diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h > index f8b0210..9189062 100644 > --- a/fs/nfsd/state.h > +++ b/fs/nfsd/state.h > @@ -352,6 +352,8 @@ struct nfs4_client { > struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ > /* wait here for slots */ > struct net *net; > + struct list_head async_copies; /* list of async copies */ > + spinlock_t async_lock; /* lock for async copies */ > }; > > /* struct nfs4_client_reset > diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h > index 9b0c099..0a19954 100644 > --- a/fs/nfsd/xdr4.h > +++ b/fs/nfsd/xdr4.h > @@ -529,6 +529,15 @@ struct nfsd4_copy { > struct nfsd4_callback cp_cb; > __be32 nfserr; > struct knfsd_fh fh; > + > + struct nfs4_client *cp_clp; > + > + struct file *fh_src; > + struct file *fh_dst; > + struct net *net; > + > + struct list_head copies; > + struct task_struct *copy_task; > }; > > struct nfsd4_seek { > -- > 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Tue, Oct 24, 2017 at 01:47:47PM -0400, Olga Kornievskaia wrote: > + copy->cp_clp = cstate->clp; > + memcpy(©->fh, &cstate->current_fh.fh_handle, > + sizeof(struct knfsd_fh)); > + copy->net = SVC_NET(rqstp); > + if (!copy->cp_synchronous) { I was thinking we might want to do a synchronous copy anyway in some cases: e.g. if the copy is relatively small or if the filesystem supports clone. But I guess that's a premature optimization; better to keep this as you have it for now. --b. > + status = nfsd4_init_copy_res(copy, 0); > + async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); > + if (!async_copy) { > + status = nfserrno(-ENOMEM); > + goto out; > + } > + dup_copy_fields(copy, async_copy); > + memcpy(©->cp_res.cb_stateid, ©->cp_dst_stateid, > + sizeof(copy->cp_dst_stateid)); > + spin_lock(&async_copy->cp_clp->async_lock); > + list_add(&async_copy->copies, > + &async_copy->cp_clp->async_copies); > + spin_unlock(&async_copy->cp_clp->async_lock); > + async_copy->copy_task = kthread_create(nfsd4_do_async_copy, > + async_copy, "%s", "copy thread"); > + if (IS_ERR(async_copy->copy_task)) { > + status = PTR_ERR(async_copy->copy_task); > + goto out_err_dec; > + } > + wake_up_process(async_copy->copy_task); > + } else { > + status = nfsd4_do_copy(copy, 1); -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Thu, Jan 25, 2018 at 5:04 PM, J. Bruce Fields <bfields@fieldses.org> wrote: > Nit: this could use a better subject line. Will change it. > On Tue, Oct 24, 2017 at 01:47:47PM -0400, Olga Kornievskaia wrote: > ... >> + if (!copy->cp_synchronous) { >> + status = nfsd4_init_copy_res(copy, 0); >> + async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); >> + if (!async_copy) { >> + status = nfserrno(-ENOMEM); >> + goto out; >> + } >> + dup_copy_fields(copy, async_copy); >> + memcpy(©->cp_res.cb_stateid, ©->cp_dst_stateid, >> + sizeof(copy->cp_dst_stateid)); >> + spin_lock(&async_copy->cp_clp->async_lock); >> + list_add(&async_copy->copies, >> + &async_copy->cp_clp->async_copies); >> + spin_unlock(&async_copy->cp_clp->async_lock); > > At this point other threads could in theory look up this async_copy, but > its copy_task field is not yet initialized. I don't *think* that's a > problem for nfsd4_shutdown_copy, because I don't think the server could > be processing rpc's for this client any more at that point. But I think > a malicious client might be able to trigger a NULL dereference in > nfsd4_offload_cancel. > > Is there any reason not to assign copy_task before adding it to this > list? Good idea. I'll move the addition to after the copy_task assignment. > > --b. > >> + async_copy->copy_task = kthread_create(nfsd4_do_async_copy, >> + async_copy, "%s", "copy thread"); >> + if (IS_ERR(async_copy->copy_task)) { >> + status = PTR_ERR(async_copy->copy_task); >> + goto out_err_dec; >> + } >> + wake_up_process(async_copy->copy_task); >> + } else { >> + status = nfsd4_do_copy(copy, 1); >> } >> - >> - fput(src); >> - fput(dst); >> out: >> return status; >> +out_err_dec: >> + cleanup_async_copy(async_copy); >> + goto out; >> } >> >> static __be32 >> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c >> index 0c04f81..d7767a1 100644 >> --- a/fs/nfsd/nfs4state.c >> +++ b/fs/nfsd/nfs4state.c >> @@ -1774,6 +1774,8 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) >> #ifdef CONFIG_NFSD_PNFS >> INIT_LIST_HEAD(&clp->cl_lo_states); >> #endif >> + INIT_LIST_HEAD(&clp->async_copies); >> + spin_lock_init(&clp->async_lock); >> spin_lock_init(&clp->cl_lock); >> rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); >> return clp; >> diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h >> index f8b0210..9189062 100644 >> --- a/fs/nfsd/state.h >> +++ b/fs/nfsd/state.h >> @@ -352,6 +352,8 @@ struct nfs4_client { >> struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ >> /* wait here for slots */ >> struct net *net; >> + struct list_head async_copies; /* list of async copies */ >> + spinlock_t async_lock; /* lock for async copies */ >> }; >> >> /* struct nfs4_client_reset >> diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h >> index 9b0c099..0a19954 100644 >> --- a/fs/nfsd/xdr4.h >> +++ b/fs/nfsd/xdr4.h >> @@ -529,6 +529,15 @@ struct nfsd4_copy { >> struct nfsd4_callback cp_cb; >> __be32 nfserr; >> struct knfsd_fh fh; >> + >> + struct nfs4_client *cp_clp; >> + >> + struct file *fh_src; >> + struct file *fh_dst; >> + struct net *net; >> + >> + struct list_head copies; >> + struct task_struct *copy_task; >> }; >> >> struct nfsd4_seek { >> -- >> 1.8.3.1 > -- > To unsubscribe from this list: send the line "unsubscribe linux-nfs" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Thu, Jan 25, 2018 at 5:29 PM, J. Bruce Fields <bfields@fieldses.org> wrote: > On Tue, Oct 24, 2017 at 01:47:47PM -0400, Olga Kornievskaia wrote: >> + copy->cp_clp = cstate->clp; >> + memcpy(©->fh, &cstate->current_fh.fh_handle, >> + sizeof(struct knfsd_fh)); >> + copy->net = SVC_NET(rqstp); >> + if (!copy->cp_synchronous) { > > I was thinking we might want to do a synchronous copy anyway in some > cases: e.g. if the copy is relatively small or if the filesystem > supports clone. > > But I guess that's a premature optimization; better to keep this as you > have it for now. Let's keep it as is for now and complicate it later :-) > > --b. > >> + status = nfsd4_init_copy_res(copy, 0); >> + async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); >> + if (!async_copy) { >> + status = nfserrno(-ENOMEM); >> + goto out; >> + } >> + dup_copy_fields(copy, async_copy); >> + memcpy(©->cp_res.cb_stateid, ©->cp_dst_stateid, >> + sizeof(copy->cp_dst_stateid)); >> + spin_lock(&async_copy->cp_clp->async_lock); >> + list_add(&async_copy->copies, >> + &async_copy->cp_clp->async_copies); >> + spin_unlock(&async_copy->cp_clp->async_lock); >> + async_copy->copy_task = kthread_create(nfsd4_do_async_copy, >> + async_copy, "%s", "copy thread"); >> + if (IS_ERR(async_copy->copy_task)) { >> + status = PTR_ERR(async_copy->copy_task); >> + goto out_err_dec; >> + } >> + wake_up_process(async_copy->copy_task); >> + } else { >> + status = nfsd4_do_copy(copy, 1); > -- > To unsubscribe from this list: send the line "unsubscribe linux-nfs" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
If I understand correctly (I may be wrong), once this patch is applied a COPY may fail that previously worked--because we're switching over to the asynchronous copy implementation before it's actually complete. Of course, that's fixed by the end of this series. But we try to avoid that situation, where functionality is temporarily broken in the middle of a patch series and then fixed later. Options might be to squash this patch together with some of the later patches. Or go ahead and add this code but don't actually enable it till later. (E.g. arrange that the "if (!copy->cp_synchronous)" case won't be taken till the last patch. Maybe it already works that way, I can't tell.) Or maybe there's some slicker way that I don't see right now. --b. -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Fri, Jan 26, 2018 at 4:34 PM, J. Bruce Fields <bfields@fieldses.org> wrote: > If I understand correctly (I may be wrong), once this patch is applied a > COPY may fail that previously worked--because we're switching over to > the asynchronous copy implementation before it's actually complete. I will have to double check this with testing but I think after this patch the asynchronous copy is functional but doesn't comply with the spec (eg., doesn't generate the unique stateid). > Of course, that's fixed by the end of this series. But we try to avoid > that situation, where functionality is temporarily broken in the middle > of a patch series and then fixed later. > > Options might be to squash this patch together with some of the later > patches. Or go ahead and add this code but don't actually enable it > till later. (E.g. arrange that the "if (!copy->cp_synchronous)" case > won't be taken till the last patch. Maybe it already works that way, I > can't tell.) Or maybe there's some slicker way that I don't see right > now. I could do if (!copy->cp_synchronous && 0) and then add a patch that removes 0. > > --b. > -- > To unsubscribe from this list: send the line "unsubscribe linux-nfs" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Fri, Feb 02, 2018 at 02:50:01PM -0500, Olga Kornievskaia wrote: > On Fri, Jan 26, 2018 at 4:34 PM, J. Bruce Fields <bfields@fieldses.org> wrote: > > If I understand correctly (I may be wrong), once this patch is applied a > > COPY may fail that previously worked--because we're switching over to > > the asynchronous copy implementation before it's actually complete. > > I will have to double check this with testing but I think after this > patch the asynchronous copy is functional but doesn't comply with the > spec (eg., doesn't generate the unique stateid). > > > Of course, that's fixed by the end of this series. But we try to avoid > > that situation, where functionality is temporarily broken in the middle > > of a patch series and then fixed later. > > > > Options might be to squash this patch together with some of the later > > patches. Or go ahead and add this code but don't actually enable it > > till later. (E.g. arrange that the "if (!copy->cp_synchronous)" case > > won't be taken till the last patch. Maybe it already works that way, I > > can't tell.) Or maybe there's some slicker way that I don't see right > > now. > > I could do if (!copy->cp_synchronous && 0) and then add a patch that removes 0. OK. I still don't see the "slick" solution, so let's go with that. --b. -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Thu, Jan 25, 2018 at 5:04 PM, J. Bruce Fields <bfields@fieldses.org> wrote: > Nit: this could use a better subject line. > > On Tue, Oct 24, 2017 at 01:47:47PM -0400, Olga Kornievskaia wrote: > ... >> + if (!copy->cp_synchronous) { >> + status = nfsd4_init_copy_res(copy, 0); >> + async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); >> + if (!async_copy) { >> + status = nfserrno(-ENOMEM); >> + goto out; >> + } >> + dup_copy_fields(copy, async_copy); >> + memcpy(©->cp_res.cb_stateid, ©->cp_dst_stateid, >> + sizeof(copy->cp_dst_stateid)); >> + spin_lock(&async_copy->cp_clp->async_lock); >> + list_add(&async_copy->copies, >> + &async_copy->cp_clp->async_copies); >> + spin_unlock(&async_copy->cp_clp->async_lock); > > At this point other threads could in theory look up this async_copy, but > its copy_task field is not yet initialized. I don't *think* that's a > problem for nfsd4_shutdown_copy, because I don't think the server could > be processing rpc's for this client any more at that point. But I think > a malicious client might be able to trigger a NULL dereference in > nfsd4_offload_cancel. > > Is there any reason not to assign copy_task before adding it to this > list? Now that I'm making changes I don't believe this is an issue. A client can't send nfsd4_offload_cancel() because it needs a copy stateid to send it with. And at this point the copy has not been replied to. > > --b. > >> + async_copy->copy_task = kthread_create(nfsd4_do_async_copy, >> + async_copy, "%s", "copy thread"); >> + if (IS_ERR(async_copy->copy_task)) { >> + status = PTR_ERR(async_copy->copy_task); >> + goto out_err_dec; >> + } >> + wake_up_process(async_copy->copy_task); >> + } else { >> + status = nfsd4_do_copy(copy, 1); >> } >> - >> - fput(src); >> - fput(dst); >> out: >> return status; >> +out_err_dec: >> + cleanup_async_copy(async_copy); >> + goto out; >> } >> >> static __be32 >> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c >> index 0c04f81..d7767a1 100644 >> --- a/fs/nfsd/nfs4state.c >> +++ b/fs/nfsd/nfs4state.c >> @@ -1774,6 +1774,8 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) >> #ifdef CONFIG_NFSD_PNFS >> INIT_LIST_HEAD(&clp->cl_lo_states); >> #endif >> + INIT_LIST_HEAD(&clp->async_copies); >> + spin_lock_init(&clp->async_lock); >> spin_lock_init(&clp->cl_lock); >> rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); >> return clp; >> diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h >> index f8b0210..9189062 100644 >> --- a/fs/nfsd/state.h >> +++ b/fs/nfsd/state.h >> @@ -352,6 +352,8 @@ struct nfs4_client { >> struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ >> /* wait here for slots */ >> struct net *net; >> + struct list_head async_copies; /* list of async copies */ >> + spinlock_t async_lock; /* lock for async copies */ >> }; >> >> /* struct nfs4_client_reset >> diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h >> index 9b0c099..0a19954 100644 >> --- a/fs/nfsd/xdr4.h >> +++ b/fs/nfsd/xdr4.h >> @@ -529,6 +529,15 @@ struct nfsd4_copy { >> struct nfsd4_callback cp_cb; >> __be32 nfserr; >> struct knfsd_fh fh; >> + >> + struct nfs4_client *cp_clp; >> + >> + struct file *fh_src; >> + struct file *fh_dst; >> + struct net *net; >> + >> + struct list_head copies; >> + struct task_struct *copy_task; >> }; >> >> struct nfsd4_seek { >> -- >> 1.8.3.1 > -- > To unsubscribe from this list: send the line "unsubscribe linux-nfs" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Thu, Feb 15, 2018 at 02:59:14PM -0500, Olga Kornievskaia wrote: > On Thu, Jan 25, 2018 at 5:04 PM, J. Bruce Fields <bfields@fieldses.org> wrote: > > Nit: this could use a better subject line. > > > > On Tue, Oct 24, 2017 at 01:47:47PM -0400, Olga Kornievskaia wrote: > > ... > >> + if (!copy->cp_synchronous) { > >> + status = nfsd4_init_copy_res(copy, 0); > >> + async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); > >> + if (!async_copy) { > >> + status = nfserrno(-ENOMEM); > >> + goto out; > >> + } > >> + dup_copy_fields(copy, async_copy); > >> + memcpy(©->cp_res.cb_stateid, ©->cp_dst_stateid, > >> + sizeof(copy->cp_dst_stateid)); > >> + spin_lock(&async_copy->cp_clp->async_lock); > >> + list_add(&async_copy->copies, > >> + &async_copy->cp_clp->async_copies); > >> + spin_unlock(&async_copy->cp_clp->async_lock); > > > > At this point other threads could in theory look up this async_copy, but > > its copy_task field is not yet initialized. I don't *think* that's a > > problem for nfsd4_shutdown_copy, because I don't think the server could > > be processing rpc's for this client any more at that point. But I think > > a malicious client might be able to trigger a NULL dereference in > > nfsd4_offload_cancel. > > > > Is there any reason not to assign copy_task before adding it to this > > list? > > Now that I'm making changes I don't believe this is an issue. A client > can't send nfsd4_offload_cancel() because it needs a copy stateid to > send it with. And at this point the copy has not been replied to. Right, but a malicious client might guess that copy stateid before it gets the reply. We want to make sure we're safe from crashing even on input that is very unlikely. --b. -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index cb6e3ea..bdccfa9 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -35,6 +35,7 @@ #include <linux/file.h> #include <linux/falloc.h> #include <linux/slab.h> +#include <linux/kthread.h> #include "idmap.h" #include "cache.h" @@ -1092,39 +1093,172 @@ static int fill_in_write_vector(struct kvec *vec, struct nfsd4_write *write) out: return status; } +static void nfsd4_cb_offload_release(struct nfsd4_callback *cb) +{ + struct nfsd4_copy *copy = container_of(cb, struct nfsd4_copy, cp_cb); + + atomic_dec(©->cp_clp->cl_refcount); + kfree(copy); +} + +static int nfsd4_cb_offload_done(struct nfsd4_callback *cb, + struct rpc_task *task) +{ + return 1; +} + +static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = { + .release = nfsd4_cb_offload_release, + .done = nfsd4_cb_offload_done +}; + +static int nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync) +{ + memcpy(©->cp_res.cb_stateid, ©->cp_dst_stateid, + sizeof(copy->cp_dst_stateid)); + copy->cp_res.wr_stable_how = NFS_UNSTABLE; + copy->cp_consecutive = 1; + copy->cp_synchronous = sync; + gen_boot_verifier(©->cp_res.wr_verifier, copy->net); + + return nfs_ok; +} + +static int _nfsd_copy_file_range(struct nfsd4_copy *copy) +{ + ssize_t bytes_copied = 0; + size_t bytes_total = copy->cp_count; + u64 src_pos = copy->cp_src_pos; + u64 dst_pos = copy->cp_dst_pos; + + do { + bytes_copied = nfsd_copy_file_range(copy->fh_src, src_pos, + copy->fh_dst, dst_pos, bytes_total); + if (bytes_copied <= 0) + break; + bytes_total -= bytes_copied; + copy->cp_res.wr_bytes_written += bytes_copied; + src_pos += bytes_copied; + dst_pos += bytes_copied; + } while (bytes_total > 0 && !copy->cp_synchronous); + return bytes_copied; +} + +static int nfsd4_do_copy(struct nfsd4_copy *copy, bool sync) +{ + __be32 status; + ssize_t bytes; + + bytes = _nfsd_copy_file_range(copy); + if (bytes < 0 && !copy->cp_res.wr_bytes_written) + status = nfserrno(bytes); + else + status = nfsd4_init_copy_res(copy, sync); + + fput(copy->fh_src); + fput(copy->fh_dst); + return status; +} + +static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst) +{ + memcpy(&dst->cp_src_stateid, &src->cp_src_stateid, sizeof(stateid_t)); + memcpy(&dst->cp_dst_stateid, &src->cp_dst_stateid, sizeof(stateid_t)); + dst->cp_src_pos = src->cp_src_pos; + dst->cp_dst_pos = src->cp_dst_pos; + dst->cp_count = src->cp_count; + dst->cp_consecutive = src->cp_consecutive; + dst->cp_synchronous = src->cp_synchronous; + memcpy(&dst->cp_res, &src->cp_res, sizeof(src->cp_res)); + /* skipping nfsd4_callback */ + memcpy(&dst->fh, &src->fh, sizeof(src->fh)); + dst->net = src->net; + dst->cp_clp = src->cp_clp; + atomic_inc(&dst->cp_clp->cl_refcount); + dst->fh_dst = get_file(src->fh_dst); + dst->fh_src = get_file(src->fh_src); +} + +static void cleanup_async_copy(struct nfsd4_copy *copy) +{ + fput(copy->fh_dst); + fput(copy->fh_src); + spin_lock(©->cp_clp->async_lock); + list_del(©->copies); + spin_unlock(©->cp_clp->async_lock); + atomic_dec(©->cp_clp->cl_refcount); + kfree(copy); +} + +static int nfsd4_do_async_copy(void *data) +{ + struct nfsd4_copy *copy = (struct nfsd4_copy *)data; + struct nfsd4_copy *cb_copy; + + copy->nfserr = nfsd4_do_copy(copy, 0); + cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); + if (!cb_copy) + goto out; + memcpy(&cb_copy->cp_res, ©->cp_res, sizeof(copy->cp_res)); + cb_copy->cp_clp = copy->cp_clp; + atomic_inc(&cb_copy->cp_clp->cl_refcount); + cb_copy->nfserr = copy->nfserr; + memcpy(&cb_copy->fh, ©->fh, sizeof(copy->fh)); + nfsd4_init_cb(&cb_copy->cp_cb, cb_copy->cp_clp, + &nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD); + nfsd4_run_cb(&cb_copy->cp_cb); +out: + cleanup_async_copy(copy); + return 0; +} static __be32 nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_copy *copy = &u->copy; - struct file *src, *dst; __be32 status; - ssize_t bytes; + struct nfsd4_copy *async_copy = NULL; - status = nfsd4_verify_copy(rqstp, cstate, ©->cp_src_stateid, &src, - ©->cp_dst_stateid, &dst); + status = nfsd4_verify_copy(rqstp, cstate, ©->cp_src_stateid, + ©->fh_src, ©->cp_dst_stateid, + ©->fh_dst); if (status) goto out; - bytes = nfsd_copy_file_range(src, copy->cp_src_pos, - dst, copy->cp_dst_pos, copy->cp_count); - - if (bytes < 0) - status = nfserrno(bytes); - else { - copy->cp_res.wr_bytes_written = bytes; - copy->cp_res.wr_stable_how = NFS_UNSTABLE; - copy->cp_consecutive = 1; - copy->cp_synchronous = 1; - gen_boot_verifier(©->cp_res.wr_verifier, SVC_NET(rqstp)); - status = nfs_ok; + copy->cp_clp = cstate->clp; + memcpy(©->fh, &cstate->current_fh.fh_handle, + sizeof(struct knfsd_fh)); + copy->net = SVC_NET(rqstp); + if (!copy->cp_synchronous) { + status = nfsd4_init_copy_res(copy, 0); + async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); + if (!async_copy) { + status = nfserrno(-ENOMEM); + goto out; + } + dup_copy_fields(copy, async_copy); + memcpy(©->cp_res.cb_stateid, ©->cp_dst_stateid, + sizeof(copy->cp_dst_stateid)); + spin_lock(&async_copy->cp_clp->async_lock); + list_add(&async_copy->copies, + &async_copy->cp_clp->async_copies); + spin_unlock(&async_copy->cp_clp->async_lock); + async_copy->copy_task = kthread_create(nfsd4_do_async_copy, + async_copy, "%s", "copy thread"); + if (IS_ERR(async_copy->copy_task)) { + status = PTR_ERR(async_copy->copy_task); + goto out_err_dec; + } + wake_up_process(async_copy->copy_task); + } else { + status = nfsd4_do_copy(copy, 1); } - - fput(src); - fput(dst); out: return status; +out_err_dec: + cleanup_async_copy(async_copy); + goto out; } static __be32 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 0c04f81..d7767a1 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1774,6 +1774,8 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) #ifdef CONFIG_NFSD_PNFS INIT_LIST_HEAD(&clp->cl_lo_states); #endif + INIT_LIST_HEAD(&clp->async_copies); + spin_lock_init(&clp->async_lock); spin_lock_init(&clp->cl_lock); rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); return clp; diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index f8b0210..9189062 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -352,6 +352,8 @@ struct nfs4_client { struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ /* wait here for slots */ struct net *net; + struct list_head async_copies; /* list of async copies */ + spinlock_t async_lock; /* lock for async copies */ }; /* struct nfs4_client_reset diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 9b0c099..0a19954 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -529,6 +529,15 @@ struct nfsd4_copy { struct nfsd4_callback cp_cb; __be32 nfserr; struct knfsd_fh fh; + + struct nfs4_client *cp_clp; + + struct file *fh_src; + struct file *fh_dst; + struct net *net; + + struct list_head copies; + struct task_struct *copy_task; }; struct nfsd4_seek {