@@ -13,6 +13,7 @@
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/types.h>
+#include <linux/bpf_mem_alloc.h>
#include <uapi/linux/btf.h>
#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
@@ -55,6 +56,7 @@ struct bpf_local_storage_map {
u32 bucket_log;
u16 elem_size;
u16 cache_idx;
+ struct bpf_mem_alloc selem_ma;
};
struct bpf_local_storage_data {
@@ -74,11 +76,7 @@ struct bpf_local_storage_elem {
struct hlist_node snode; /* Linked to bpf_local_storage */
struct bpf_local_storage __rcu *local_storage;
struct rcu_head rcu;
- /* 8 bytes hole */
- /* The data is stored in another cacheline to minimize
- * the number of cachelines access during a cache hit.
- */
- struct bpf_local_storage_data sdata ____cacheline_aligned;
+ struct bpf_local_storage_data sdata;
};
struct bpf_local_storage {
@@ -80,12 +80,23 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
if (charge_mem && mem_charge(smap, owner, smap->elem_size))
return NULL;
- selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
- gfp_flags | __GFP_NOWARN);
+ migrate_disable();
+ selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
+ migrate_enable();
+
if (selem) {
if (value)
copy_map_value(&smap->map, SDATA(selem)->data, value);
- /* No need to call check_and_init_map_value as memory is zero init */
+ else
+ /* Keep the original bpf_map_kzalloc behavior
+ * before started using the bpf_mem_cache_alloc.
+ *
+ * No need to use zero_map_value. The bpf_selem_free()
+ * only does bpf_mem_cache_free when there is
+ * no other bpf prog is using the selem.
+ */
+ memset(SDATA(selem)->data, 0, smap->map.value_size);
+
return selem;
}
@@ -129,7 +140,7 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu)
struct bpf_local_storage_elem *selem;
selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
- kfree(selem);
+ bpf_mem_cache_raw_free(selem);
}
static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
@@ -145,10 +156,17 @@ void bpf_selem_free(struct bpf_local_storage_elem *selem,
bool reuse_now)
{
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
- if (!reuse_now)
+ if (!reuse_now) {
call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
- else
- call_rcu(&selem->rcu, bpf_selem_free_rcu);
+ } else {
+ /* Instead of using the vanilla call_rcu(),
+ * bpf_mem_cache_free should be able to reuse selem
+ * immediately.
+ */
+ migrate_disable();
+ bpf_mem_cache_free(&smap->selem_ma, selem);
+ migrate_enable();
+ }
}
/* local_storage->lock must be held and selem->local_storage == local_storage.
@@ -661,6 +679,7 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
struct bpf_local_storage_map *smap;
unsigned int i;
u32 nbuckets;
+ int err;
smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
if (!smap)
@@ -675,8 +694,8 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
nbuckets, GFP_USER | __GFP_NOWARN);
if (!smap->buckets) {
- bpf_map_area_free(smap);
- return ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
+ goto free_smap;
}
for (i = 0; i < nbuckets; i++) {
@@ -687,8 +706,17 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
smap->elem_size = offsetof(struct bpf_local_storage_elem,
sdata.data[attr->value_size]);
+ err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
+ if (err)
+ goto free_smap;
+
smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
return &smap->map;
+
+free_smap:
+ kvfree(smap->buckets);
+ bpf_map_area_free(smap);
+ return ERR_PTR(err);
}
void bpf_local_storage_map_free(struct bpf_map *map,
@@ -754,6 +782,7 @@ void bpf_local_storage_map_free(struct bpf_map *map,
*/
synchronize_rcu();
+ bpf_mem_alloc_destroy(&smap->selem_ma);
kvfree(smap->buckets);
bpf_map_area_free(smap);
}