From patchwork Tue Oct 24 17:47:47 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Olga Kornievskaia X-Patchwork-Id: 10025249 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id DE62760245 for ; Tue, 24 Oct 2017 17:48:04 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id E0ECD25F31 for ; Tue, 24 Oct 2017 17:48:04 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id D5DC928408; Tue, 24 Oct 2017 17:48:04 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 3640D25F31 for ; Tue, 24 Oct 2017 17:48:04 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932131AbdJXRsC (ORCPT ); Tue, 24 Oct 2017 13:48:02 -0400 Received: from mx143.netapp.com ([216.240.21.24]:13843 "EHLO mx143.netapp.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751864AbdJXRsA (ORCPT ); Tue, 24 Oct 2017 13:48:00 -0400 X-IronPort-AV: E=Sophos;i="5.43,428,1503385200"; d="scan'208";a="223922630" Received: from vmwexchts02-prd.hq.netapp.com ([10.122.105.23]) by mx143-out.netapp.com with ESMTP; 24 Oct 2017 10:18:11 -0700 Received: from smtp2.corp.netapp.com (10.122.76.114) by VMWEXCHTS02-PRD.hq.netapp.com (10.122.105.23) with Microsoft SMTP Server (TLS) id 15.0.1320.4; Tue, 24 Oct 2017 10:47:56 -0700 Received: from Olgas-MBP-107.vpn.netapp.com (olgas-mbp-107.vpn.netapp.com [10.55.74.109]) by smtp2.corp.netapp.com (8.14.9+Sun/8.13.1/NTAP-1.6) with ESMTP id v9OHlqqf001121; Tue, 24 Oct 2017 10:47:56 -0700 (PDT) From: Olga Kornievskaia To: CC: Subject: [PATCH v6 05/10] NFSD first draft of async copy Date: Tue, 24 Oct 2017 13:47:47 -0400 Message-ID: <20171024174752.74910-6-kolga@netapp.com> X-Mailer: git-send-email 2.10.1 (Apple Git-78) In-Reply-To: <20171024174752.74910-1-kolga@netapp.com> References: <20171024174752.74910-1-kolga@netapp.com> MIME-Version: 1.0 Sender: linux-nfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Upon receiving a request for async copy, create a new kthread. If we get asynchronous request, make sure to copy the needed arguments/state from the stack before starting the copy. Then start the thread and reply back to the client indicating copy is asynchronous. nfsd_copy_file_range() will copy in a loop over the total number of bytes is needed to copy. In case a failure happens in the middle, we ignore the error and return how much we copied so far. Once done creating a workitem for the callback workqueue and send CB_OFFLOAD with the results. --- fs/nfsd/nfs4proc.c | 172 ++++++++++++++++++++++++++++++++++++++++++++++------ fs/nfsd/nfs4state.c | 2 + fs/nfsd/state.h | 2 + fs/nfsd/xdr4.h | 9 +++ 4 files changed, 166 insertions(+), 19 deletions(-) diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index cb6e3ea..bdccfa9 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "idmap.h" #include "cache.h" @@ -1092,39 +1093,172 @@ static int fill_in_write_vector(struct kvec *vec, struct nfsd4_write *write) out: return status; } +static void nfsd4_cb_offload_release(struct nfsd4_callback *cb) +{ + struct nfsd4_copy *copy = container_of(cb, struct nfsd4_copy, cp_cb); + + atomic_dec(©->cp_clp->cl_refcount); + kfree(copy); +} + +static int nfsd4_cb_offload_done(struct nfsd4_callback *cb, + struct rpc_task *task) +{ + return 1; +} + +static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = { + .release = nfsd4_cb_offload_release, + .done = nfsd4_cb_offload_done +}; + +static int nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync) +{ + memcpy(©->cp_res.cb_stateid, ©->cp_dst_stateid, + sizeof(copy->cp_dst_stateid)); + copy->cp_res.wr_stable_how = NFS_UNSTABLE; + copy->cp_consecutive = 1; + copy->cp_synchronous = sync; + gen_boot_verifier(©->cp_res.wr_verifier, copy->net); + + return nfs_ok; +} + +static int _nfsd_copy_file_range(struct nfsd4_copy *copy) +{ + ssize_t bytes_copied = 0; + size_t bytes_total = copy->cp_count; + u64 src_pos = copy->cp_src_pos; + u64 dst_pos = copy->cp_dst_pos; + + do { + bytes_copied = nfsd_copy_file_range(copy->fh_src, src_pos, + copy->fh_dst, dst_pos, bytes_total); + if (bytes_copied <= 0) + break; + bytes_total -= bytes_copied; + copy->cp_res.wr_bytes_written += bytes_copied; + src_pos += bytes_copied; + dst_pos += bytes_copied; + } while (bytes_total > 0 && !copy->cp_synchronous); + return bytes_copied; +} + +static int nfsd4_do_copy(struct nfsd4_copy *copy, bool sync) +{ + __be32 status; + ssize_t bytes; + + bytes = _nfsd_copy_file_range(copy); + if (bytes < 0 && !copy->cp_res.wr_bytes_written) + status = nfserrno(bytes); + else + status = nfsd4_init_copy_res(copy, sync); + + fput(copy->fh_src); + fput(copy->fh_dst); + return status; +} + +static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst) +{ + memcpy(&dst->cp_src_stateid, &src->cp_src_stateid, sizeof(stateid_t)); + memcpy(&dst->cp_dst_stateid, &src->cp_dst_stateid, sizeof(stateid_t)); + dst->cp_src_pos = src->cp_src_pos; + dst->cp_dst_pos = src->cp_dst_pos; + dst->cp_count = src->cp_count; + dst->cp_consecutive = src->cp_consecutive; + dst->cp_synchronous = src->cp_synchronous; + memcpy(&dst->cp_res, &src->cp_res, sizeof(src->cp_res)); + /* skipping nfsd4_callback */ + memcpy(&dst->fh, &src->fh, sizeof(src->fh)); + dst->net = src->net; + dst->cp_clp = src->cp_clp; + atomic_inc(&dst->cp_clp->cl_refcount); + dst->fh_dst = get_file(src->fh_dst); + dst->fh_src = get_file(src->fh_src); +} + +static void cleanup_async_copy(struct nfsd4_copy *copy) +{ + fput(copy->fh_dst); + fput(copy->fh_src); + spin_lock(©->cp_clp->async_lock); + list_del(©->copies); + spin_unlock(©->cp_clp->async_lock); + atomic_dec(©->cp_clp->cl_refcount); + kfree(copy); +} + +static int nfsd4_do_async_copy(void *data) +{ + struct nfsd4_copy *copy = (struct nfsd4_copy *)data; + struct nfsd4_copy *cb_copy; + + copy->nfserr = nfsd4_do_copy(copy, 0); + cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); + if (!cb_copy) + goto out; + memcpy(&cb_copy->cp_res, ©->cp_res, sizeof(copy->cp_res)); + cb_copy->cp_clp = copy->cp_clp; + atomic_inc(&cb_copy->cp_clp->cl_refcount); + cb_copy->nfserr = copy->nfserr; + memcpy(&cb_copy->fh, ©->fh, sizeof(copy->fh)); + nfsd4_init_cb(&cb_copy->cp_cb, cb_copy->cp_clp, + &nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD); + nfsd4_run_cb(&cb_copy->cp_cb); +out: + cleanup_async_copy(copy); + return 0; +} static __be32 nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_copy *copy = &u->copy; - struct file *src, *dst; __be32 status; - ssize_t bytes; + struct nfsd4_copy *async_copy = NULL; - status = nfsd4_verify_copy(rqstp, cstate, ©->cp_src_stateid, &src, - ©->cp_dst_stateid, &dst); + status = nfsd4_verify_copy(rqstp, cstate, ©->cp_src_stateid, + ©->fh_src, ©->cp_dst_stateid, + ©->fh_dst); if (status) goto out; - bytes = nfsd_copy_file_range(src, copy->cp_src_pos, - dst, copy->cp_dst_pos, copy->cp_count); - - if (bytes < 0) - status = nfserrno(bytes); - else { - copy->cp_res.wr_bytes_written = bytes; - copy->cp_res.wr_stable_how = NFS_UNSTABLE; - copy->cp_consecutive = 1; - copy->cp_synchronous = 1; - gen_boot_verifier(©->cp_res.wr_verifier, SVC_NET(rqstp)); - status = nfs_ok; + copy->cp_clp = cstate->clp; + memcpy(©->fh, &cstate->current_fh.fh_handle, + sizeof(struct knfsd_fh)); + copy->net = SVC_NET(rqstp); + if (!copy->cp_synchronous) { + status = nfsd4_init_copy_res(copy, 0); + async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); + if (!async_copy) { + status = nfserrno(-ENOMEM); + goto out; + } + dup_copy_fields(copy, async_copy); + memcpy(©->cp_res.cb_stateid, ©->cp_dst_stateid, + sizeof(copy->cp_dst_stateid)); + spin_lock(&async_copy->cp_clp->async_lock); + list_add(&async_copy->copies, + &async_copy->cp_clp->async_copies); + spin_unlock(&async_copy->cp_clp->async_lock); + async_copy->copy_task = kthread_create(nfsd4_do_async_copy, + async_copy, "%s", "copy thread"); + if (IS_ERR(async_copy->copy_task)) { + status = PTR_ERR(async_copy->copy_task); + goto out_err_dec; + } + wake_up_process(async_copy->copy_task); + } else { + status = nfsd4_do_copy(copy, 1); } - - fput(src); - fput(dst); out: return status; +out_err_dec: + cleanup_async_copy(async_copy); + goto out; } static __be32 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 0c04f81..d7767a1 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1774,6 +1774,8 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) #ifdef CONFIG_NFSD_PNFS INIT_LIST_HEAD(&clp->cl_lo_states); #endif + INIT_LIST_HEAD(&clp->async_copies); + spin_lock_init(&clp->async_lock); spin_lock_init(&clp->cl_lock); rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); return clp; diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index f8b0210..9189062 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -352,6 +352,8 @@ struct nfs4_client { struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ /* wait here for slots */ struct net *net; + struct list_head async_copies; /* list of async copies */ + spinlock_t async_lock; /* lock for async copies */ }; /* struct nfs4_client_reset diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 9b0c099..0a19954 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -529,6 +529,15 @@ struct nfsd4_copy { struct nfsd4_callback cp_cb; __be32 nfserr; struct knfsd_fh fh; + + struct nfs4_client *cp_clp; + + struct file *fh_src; + struct file *fh_dst; + struct net *net; + + struct list_head copies; + struct task_struct *copy_task; }; struct nfsd4_seek {