===================================================================
@@ -145,7 +145,7 @@ struct dm_buffer {
unsigned char list_mode; /* LIST_* */
blk_status_t read_error;
blk_status_t write_error;
- unsigned accessed;
+ volatile unsigned accessed;
unsigned hold_count;
unsigned long state;
unsigned long last_accessed;
@@ -193,6 +193,18 @@ static void dm_bufio_unlock(struct dm_bu
mutex_unlock(&c->lock);
}
+void dm_bufio_lock_read(struct dm_bufio_client *c)
+{
+ mutex_lock(&c->lock);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_lock_read);
+
+void dm_bufio_unlock_read(struct dm_bufio_client *c)
+{
+ mutex_unlock(&c->lock);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_unlock_read);
+
/*----------------------------------------------------------------*/
/*
@@ -870,7 +882,8 @@ enum new_flag {
NF_FRESH = 0,
NF_READ = 1,
NF_GET = 2,
- NF_PREFETCH = 3
+ NF_GET_UNLOCKED = 3,
+ NF_PREFETCH = 4
};
/*
@@ -1013,7 +1026,7 @@ static struct dm_buffer *__bufio_new(str
if (b)
goto found_buffer;
- if (nf == NF_GET)
+ if (nf == NF_GET || nf == NF_GET_UNLOCKED)
return NULL;
new_b = __alloc_buffer_wait(c, nf);
@@ -1058,12 +1071,17 @@ found_buffer:
* If the user called both dm_bufio_prefetch and dm_bufio_get on
* the same buffer, it would deadlock if we waited.
*/
- if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
+ if ((nf == NF_GET || nf == NF_GET_UNLOCKED) && unlikely(test_bit(B_READING, &b->state)))
return NULL;
- b->hold_count++;
- __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
- test_bit(B_WRITING, &b->state));
+ if (nf == NF_GET_UNLOCKED) {
+ if (!b->accessed)
+ b->accessed = 1;
+ } else {
+ b->hold_count++;
+ __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
+ test_bit(B_WRITING, &b->state));
+ }
return b;
}
@@ -1154,6 +1172,19 @@ void *dm_bufio_new(struct dm_bufio_clien
}
EXPORT_SYMBOL_GPL(dm_bufio_new);
+void *dm_bufio_get_unlocked(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp)
+{
+ int need_submit;
+ struct dm_buffer *b = __bufio_new(c, block, NF_GET_UNLOCKED, &need_submit, NULL);
+ if (b) {
+ *bp = b;
+ return b->data;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_unlocked);
+
void dm_bufio_prefetch(struct dm_bufio_client *c,
sector_t block, unsigned n_blocks)
{
===================================================================
@@ -38,6 +38,13 @@ dm_bufio_client_create(struct block_devi
void dm_bufio_client_destroy(struct dm_bufio_client *c);
/*
+ * Lock and unlock the bufio client - this is needed if we want to call
+ * dm_bufio_get_unlocked.
+ */
+void dm_bufio_lock_read(struct dm_bufio_client *c);
+void dm_bufio_unlock_read(struct dm_bufio_client *c);
+
+/*
* Set the sector range.
* When this function is called, there must be no I/O in progress on the bufio
* client.
@@ -76,6 +83,14 @@ void *dm_bufio_new(struct dm_bufio_clien
struct dm_buffer **bp);
/*
+ * Like dm_bufio_get, but assume that the client is already locked with
+ * dm_bufio_lock_read/dm_bufio_unlock_read. dm_bufio_release should not be
+ * called; the caller should call dm_bufio_unlock_read to release the buffer.
+ */
+void *dm_bufio_get_unlocked(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
* Prefetch the specified blocks to the cache.
* The function starts to read the blocks and returns without waiting for
* I/O to finish.
In order to reduce locking overhead, we introduce a new API: dm_bufio_lock_read, dm_bufio_unlock_read and dm_bufio_get_unlocked. The user calls dm_bufio_lock_read, then he might call dm_bufio_get_unlocked several times and finally calls dm_bufio_unlock_read. This is intended to simplify btree walks in dm-thin. The follow-up patch will change c->lock from mutex to rw-semaphore, so that there could be multiple processes walking the btree concurently. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> --- drivers/md/dm-bufio.c | 45 ++++++++++++++++++++++++++++++++++++++------- include/linux/dm-bufio.h | 15 +++++++++++++++ 2 files changed, 53 insertions(+), 7 deletions(-) -- dm-devel mailing list dm-devel@redhat.com https://listman.redhat.com/mailman/listinfo/dm-devel