@@ -962,23 +962,33 @@ static int cache_release(struct inode *i
static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
{
- struct cache_queue *cq;
+ struct cache_queue *cq, *tmp;
+ struct cache_request *cr;
+ struct list_head dequeued;
+
+ INIT_LIST_HEAD(&dequeued);
spin_lock(&queue_lock);
- list_for_each_entry(cq, &detail->queue, list)
+ list_for_each_entry_safe(cq, tmp, &detail->queue, list)
if (!cq->reader) {
- struct cache_request *cr = container_of(cq, struct cache_request, q);
+ cr = container_of(cq, struct cache_request, q);
if (cr->item != ch)
continue;
+ if (test_bit(CACHE_PENDING, &ch->flags))
+ /* Lost a race and it is pending again */
+ break;
if (cr->readers != 0)
continue;
- list_del(&cr->q.list);
- spin_unlock(&queue_lock);
- cache_put(cr->item, detail);
- kfree(cr->buf);
- kfree(cr);
- return;
+ list_move(&cr->q.list, &dequeued);
}
spin_unlock(&queue_lock);
+
+ while (!list_empty(&dequeued)) {
+ cr = list_entry(dequeued.next, struct cache_request, q.list);
+ list_del(&cr->q.list);
+ cache_put(cr->item, detail);
+ kfree(cr->buf);
+ kfree(cr);
+ }
}
/*
@@ -1081,6 +1091,7 @@ int sunrpc_cache_pipe_upcall(struct cach
struct cache_request *crq;
char *bp;
int len;
+ int ret = 0;
if (atomic_read(&detail->readers) == 0 &&
detail->last_close < get_seconds() - 30) {
@@ -1113,10 +1124,18 @@ int sunrpc_cache_pipe_upcall(struct cach
crq->len = PAGE_SIZE - len;
crq->readers = 0;
spin_lock(&queue_lock);
- list_add_tail(&crq->q.list, &detail->queue);
+ if (test_bit(CACHE_PENDING, &h->flags))
+ list_add_tail(&crq->q.list, &detail->queue);
+ else
+ /* Lost a race, no longer PENDING, so don't enqueue */
+ ret = -EAGAIN;
spin_unlock(&queue_lock);
wake_up(&queue_wait);
- return 0;
+ if (ret == -EAGAIN) {
+ kfree(buf);
+ kfree(crq);
+ }
+ return ret;
}
EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
############################
@@ -233,19 +233,16 @@ int cache_check(struct cache_detail *det
if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
switch (cache_make_upcall(detail, h)) {
case -EINVAL:
- clear_bit(CACHE_PENDING, &h->flags);
- cache_revisit_request(h);
- if (rv == -EAGAIN) {
+ write_lock(&detail->hash_lock);
+ if (rv) {
set_bit(CACHE_NEGATIVE, &h->flags);
cache_fresh_locked(h, monotonic_seconds()+CACHE_NEW_EXPIRY);
- cache_fresh_unlocked(h, detail);
rv = -ENOENT;
}
- break;
-
+ write_unlock(&detail->hash_lock);
+ /* FALLTHROUGH */
case -EAGAIN:
- clear_bit(CACHE_PENDING, &h->flags);
- cache_revisit_request(h);
+ cache_fresh_unlocked(h, detail);
break;
}
}
@@ -405,17 +402,12 @@ static int cache_clean(void)
&& ch->last_refresh >= current_detail->flush_time
)
continue;
- if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
- cache_dequeue(current_detail, ch);
- if (atomic_read(&ch->ref.refcount) == 1)
- break;
- }
- if (ch) {
*cp = ch->next;
ch->next = NULL;
current_detail->entries--;
rv = 1;
+ break;
}
write_unlock(¤t_detail->hash_lock);
d = current_detail;
@@ -423,7 +415,7 @@ static int cache_clean(void)
current_index ++;
spin_unlock(&cache_list_lock);
if (ch) {
- cache_revisit_request(ch);
+ cache_fresh_unlocked(ch, d);
cache_put(ch, d);
}
} else