@@ -18,6 +18,17 @@ void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size)
cache->max_size = max_size;
}
+static struct btrfs_lru_cache_entry *match_entry(struct list_head *head, u64 key)
+{
+ struct btrfs_lru_cache_entry *entry;
+
+ list_for_each_entry(entry, head, list)
+ if (entry->key == key)
+ return entry;
+
+ return NULL;
+}
+
/*
* Lookup for an entry in the cache.
*
@@ -29,15 +40,48 @@ void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size)
struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache,
u64 key)
{
+ struct list_head *head;
struct btrfs_lru_cache_entry *entry;
- entry = mtree_load(&cache->entries, key);
+ head = mtree_load(&cache->entries, key);
+ if (!head)
+ return NULL;
+
+ entry = match_entry(head, key);
if (entry)
list_move_tail(&entry->lru_list, &cache->lru_list);
return entry;
}
+static void delete_entry(struct btrfs_lru_cache *cache,
+ struct btrfs_lru_cache_entry *entry)
+{
+ struct list_head *prev = entry->list.prev;
+
+ ASSERT(cache->size > 0);
+ ASSERT(!mtree_empty(&cache->entries));
+
+ list_del(&entry->list);
+ list_del(&entry->lru_list);
+
+ if (list_empty(prev)) {
+ struct list_head *head;
+
+ /*
+ * If previous element in the list entry->list is now empty, it
+ * means it's a head entry not pointing to any cached entries,
+ * so remove it from the maple tree and free it.
+ */
+ head = mtree_erase(&cache->entries, entry->key);
+ ASSERT(head == prev);
+ kfree(head);
+ }
+
+ kfree(entry);
+ cache->size--;
+}
+
/*
* Store an entry in the cache.
*
@@ -50,26 +94,39 @@ int btrfs_lru_cache_store(struct btrfs_lru_cache *cache,
struct btrfs_lru_cache_entry *new_entry,
gfp_t gfp)
{
+ const u64 key = new_entry->key;
+ struct list_head *head;
int ret;
+ head = kmalloc(sizeof(*head), gfp);
+ if (!head)
+ return -ENOMEM;
+
+ ret = mtree_insert(&cache->entries, key, head, gfp);
+ if (ret == 0) {
+ INIT_LIST_HEAD(head);
+ list_add_tail(&new_entry->list, head);
+ } else if (ret == -EEXIST) {
+ kfree(head);
+ head = mtree_load(&cache->entries, key);
+ ASSERT(head != NULL);
+ if (match_entry(head, key) != NULL)
+ return -EEXIST;
+ list_add_tail(&new_entry->list, head);
+ } else if (ret < 0) {
+ kfree(head);
+ return ret;
+ }
+
if (cache->size == cache->max_size) {
struct btrfs_lru_cache_entry *lru_entry;
- struct btrfs_lru_cache_entry *mt_entry;
lru_entry = list_first_entry(&cache->lru_list,
struct btrfs_lru_cache_entry,
lru_list);
- mt_entry = mtree_erase(&cache->entries, lru_entry->key);
- ASSERT(mt_entry == lru_entry);
- list_del(&mt_entry->lru_list);
- kfree(mt_entry);
- cache->size--;
+ delete_entry(cache, lru_entry);
}
- ret = mtree_insert(&cache->entries, new_entry->key, new_entry, gfp);
- if (ret < 0)
- return ret;
-
list_add_tail(&new_entry->lru_list, &cache->lru_list);
cache->size++;
@@ -89,9 +146,8 @@ void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache)
struct btrfs_lru_cache_entry *tmp;
list_for_each_entry_safe(entry, tmp, &cache->lru_list, lru_list)
- kfree(entry);
+ delete_entry(cache, entry);
- INIT_LIST_HEAD(&cache->lru_list);
- mtree_destroy(&cache->entries);
- cache->size = 0;
+ ASSERT(cache->size == 0);
+ ASSERT(mtree_empty(&cache->entries));
}
@@ -17,6 +17,18 @@
struct btrfs_lru_cache_entry {
struct list_head lru_list;
u64 key;
+ /*
+ * The maple tree uses unsigned long type for the keys, which is 32 bits
+ * on 32 bits systems, and 64 bits on 64 bits systems. So if we want to
+ * use something like inode numbers as keys, which are always a u64, we
+ * have to deal with this in a special way - we store the key in the
+ * entry itself, as a u64, and the values inserted into the maple tree
+ * are linked lists of entries - so in case we are on a 64 bits system,
+ * that list always has a single entry, while on 32 bits systems it
+ * may have more than one, with each entry having the same value for
+ * their lower 32 bits of the u64 key.
+ */
+ struct list_head list;
};
struct btrfs_lru_cache {