From patchwork Wed Aug 6 17:44:24 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Trond Myklebust X-Patchwork-Id: 4687711 Return-Path: X-Original-To: patchwork-linux-nfs@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork2.web.kernel.org (Postfix) with ESMTP id C1277C0338 for ; Wed, 6 Aug 2014 17:44:43 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id D1C1B20120 for ; Wed, 6 Aug 2014 17:44:42 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id D27CA20121 for ; Wed, 6 Aug 2014 17:44:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754014AbaHFRok (ORCPT ); Wed, 6 Aug 2014 13:44:40 -0400 Received: from mail-ig0-f176.google.com ([209.85.213.176]:58888 "EHLO mail-ig0-f176.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752161AbaHFRoj (ORCPT ); Wed, 6 Aug 2014 13:44:39 -0400 Received: by mail-ig0-f176.google.com with SMTP id hn18so9071224igb.15 for ; Wed, 06 Aug 2014 10:44:38 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=yzzJFAcEuu79SsL6dSDhKLtQ1BS+hrVAIOxUEq3TvqI=; b=Qrpblt3eMhhXWnms45Fr1Uup59/pX8NoY5lRGhEx9gLxFkwiE5Kjy5JvA3SDUtsBmg Xrrc6Difr+MYWpvgWeeRMT2gjt28RBVP5efw5v1Ix3J/zY1jGPfj7O8R/YQq6dJ5SbKS IiiiMSDkAFQhtrBIRw3WeCa56zZvTnE8f5cknvnAfJtbuc4Eu505ENGaBqZREpaECn5v vFa0Tf3R+dD5SzKSI0thPSSZ/niR8tLzXwea2I6b/rK9luZX+fPhmaFrsV52Fq3Qle6X 9OEOFgBeQbKnqXlbIIoyLLlbW7aZCaWrBqNQlqxQySx8LaF3OHYp72y8Omq+aHejPYwI 3dgw== X-Gm-Message-State: ALoCoQmuxzahunkhbI1+YcmMZ1agi3Goznl8GU/PdvzwT2THNby6UQnuKUm32Tn9Xs/k/5a6sD8n X-Received: by 10.50.57.68 with SMTP id g4mr61066918igq.48.1407347078313; Wed, 06 Aug 2014 10:44:38 -0700 (PDT) Received: from leira.trondhjem.org.localdomain (c-98-209-19-95.hsd1.mi.comcast.net. [98.209.19.95]) by mx.google.com with ESMTPSA id l4sm24633867igt.20.2014.08.06.10.44.37 for (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 06 Aug 2014 10:44:37 -0700 (PDT) From: Trond Myklebust To: Bruce Fields Cc: linux-nfs@vger.kernel.org Subject: [PATCH v2 5/6] nfsd: split DRC global spinlock into per-bucket locks Date: Wed, 6 Aug 2014 13:44:24 -0400 Message-Id: <1407347065-17463-6-git-send-email-trond.myklebust@primarydata.com> X-Mailer: git-send-email 1.9.3 In-Reply-To: <1407347065-17463-5-git-send-email-trond.myklebust@primarydata.com> References: <1407347065-17463-1-git-send-email-trond.myklebust@primarydata.com> <1407347065-17463-2-git-send-email-trond.myklebust@primarydata.com> <1407347065-17463-3-git-send-email-trond.myklebust@primarydata.com> <1407347065-17463-4-git-send-email-trond.myklebust@primarydata.com> <1407347065-17463-5-git-send-email-trond.myklebust@primarydata.com> Sender: linux-nfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org X-Spam-Status: No, score=-7.6 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Signed-off-by: Trond Myklebust --- fs/nfsd/nfscache.c | 43 ++++++++++++++++++++----------------------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index dc909091349b..74603654b7f9 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -29,6 +29,7 @@ struct nfsd_drc_bucket { struct list_head lru_head; + spinlock_t cache_lock; }; static struct nfsd_drc_bucket *drc_hashtbl; @@ -79,7 +80,6 @@ static struct shrinker nfsd_reply_cache_shrinker = { * A cache entry is "single use" if c_state == RC_INPROG * Otherwise, it when accessing _prev or _next, the lock must be held. */ -static DEFINE_SPINLOCK(cache_lock); static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); /* @@ -154,11 +154,11 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp) } static void -nfsd_reply_cache_free(struct svc_cacherep *rp) +nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) { - spin_lock(&cache_lock); + spin_lock(&b->cache_lock); nfsd_reply_cache_free_locked(rp); - spin_unlock(&cache_lock); + spin_unlock(&b->cache_lock); } int nfsd_reply_cache_init(void) @@ -180,8 +180,10 @@ int nfsd_reply_cache_init(void) drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL); if (!drc_hashtbl) goto out_nomem; - for (i = 0; i < hashsize; i++) + for (i = 0; i < hashsize; i++) { INIT_LIST_HEAD(&drc_hashtbl[i].lru_head); + spin_lock_init(&drc_hashtbl[i].cache_lock); + } drc_hashsize = hashsize; return 0; @@ -265,9 +267,13 @@ prune_cache_entries(void) for (i = 0; i < drc_hashsize; i++) { struct nfsd_drc_bucket *b = &drc_hashtbl[i]; + if (list_empty(&b->lru_head)) + continue; + spin_lock(&b->cache_lock); freed += prune_bucket(b); if (!list_empty(&b->lru_head)) cancel = false; + spin_unlock(&b->cache_lock); } /* @@ -282,9 +288,7 @@ prune_cache_entries(void) static void cache_cleaner_func(struct work_struct *unused) { - spin_lock(&cache_lock); prune_cache_entries(); - spin_unlock(&cache_lock); } static unsigned long @@ -296,12 +300,7 @@ nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) { - unsigned long freed; - - spin_lock(&cache_lock); - freed = prune_cache_entries(); - spin_unlock(&cache_lock); - return freed; + return prune_cache_entries(); } /* * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes @@ -426,14 +425,14 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) * preallocate an entry. */ rp = nfsd_reply_cache_alloc(); - spin_lock(&cache_lock); + spin_lock(&b->cache_lock); if (likely(rp)) { atomic_inc(&num_drc_entries); drc_mem_usage += sizeof(*rp); } /* go ahead and prune the cache */ - prune_cache_entries(); + prune_bucket(b); found = nfsd_cache_search(b, rqstp, csum); if (found) { @@ -470,7 +469,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) } rp->c_type = RC_NOCACHE; out: - spin_unlock(&cache_lock); + spin_unlock(&b->cache_lock); return rtn; found_entry: @@ -548,7 +547,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) /* Don't cache excessive amounts of data and XDR failures */ if (!statp || len > (256 >> 2)) { - nfsd_reply_cache_free(rp); + nfsd_reply_cache_free(b, rp); return; } @@ -563,23 +562,23 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) bufsize = len << 2; cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); if (!cachv->iov_base) { - nfsd_reply_cache_free(rp); + nfsd_reply_cache_free(b, rp); return; } cachv->iov_len = bufsize; memcpy(cachv->iov_base, statp, bufsize); break; case RC_NOCACHE: - nfsd_reply_cache_free(rp); + nfsd_reply_cache_free(b, rp); return; } - spin_lock(&cache_lock); + spin_lock(&b->cache_lock); drc_mem_usage += bufsize; lru_put_end(b, rp); rp->c_secure = rqstp->rq_secure; rp->c_type = cachetype; rp->c_state = RC_DONE; - spin_unlock(&cache_lock); + spin_unlock(&b->cache_lock); return; } @@ -610,7 +609,6 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) */ static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) { - spin_lock(&cache_lock); seq_printf(m, "max entries: %u\n", max_drc_entries); seq_printf(m, "num entries: %u\n", atomic_read(&num_drc_entries)); @@ -622,7 +620,6 @@ static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) seq_printf(m, "payload misses: %u\n", payload_misses); seq_printf(m, "longest chain len: %u\n", longest_chain); seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize); - spin_unlock(&cache_lock); return 0; }