@@ -33,6 +33,9 @@ static unsigned int max_drc_entries; /* max number of entries */
static unsigned int drc_mem_usage; /* memory used by cache */
static unsigned int csum_misses; /* cache misses due only to
csum comparison failures */
+static unsigned int max_search_time; /* max time to search */
+static unsigned int avg_search_time; /* avg time to search */
+
/*
* Calculate the hash index from an XID.
*/
@@ -310,16 +313,28 @@ nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
static struct svc_cacherep *
nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
{
- struct svc_cacherep *rp;
+ struct svc_cacherep *rp, *ret = NULL;
struct hlist_node *hn;
struct hlist_head *rh;
+ ktime_t start;
+ unsigned int delta;
+ static unsigned int nsearches = 0;
+ unsigned int osearches;
+ start = ktime_get();
rh = &cache_hash[request_hash(rqstp->rq_xid)];
hlist_for_each_entry(rp, hn, rh, c_hash) {
- if (nfsd_cache_match(rqstp, csum, rp))
- return rp;
+ if (nfsd_cache_match(rqstp, csum, rp)) {
+ ret = rp;
+ break;
+ }
}
- return NULL;
+ delta = (unsigned int)ktime_to_ns(ktime_sub(ktime_get(), start));
+ osearches = nsearches++;
+ avg_search_time = (avg_search_time * osearches + delta) / nsearches;
+ max_search_time = max(max_search_time, delta);
+
+ return ret;
}
/*
@@ -589,6 +604,8 @@ static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
seq_printf(m, "checksum misses: %u\n", csum_misses);
seq_printf(m, "max chain len: %u\n", nfsd_repcache_max_chain_len());
+ seq_printf(m, "avg search time: %uns\n", avg_search_time);
+ seq_printf(m, "max search time: %uns\n", max_search_time);
spin_unlock(&cache_lock);
return 0;
}
Since we're moderately concerned about the potential increase in hash chain lengths, keep a running average of the time and the max time to search. Signed-off-by: Jeff Layton <jlayton@redhat.com> --- fs/nfsd/nfscache.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-)