diff mbox series

btrfs: qgroup: use kmem cache to alloc struct btrfs_qgroup_extent_record

Message ID 20240527101334.207228-1-sunjunchao2870@gmail.com (mailing list archive)
State New
Headers show
Series btrfs: qgroup: use kmem cache to alloc struct btrfs_qgroup_extent_record | expand

Commit Message

JunChao Sun May 27, 2024, 10:13 a.m. UTC
Fixes a todo in qgroup code by utilizing kmem cache to accelerate
the allocation of struct btrfs_qgroup_extent_record.

This patch has passed the check -g qgroup tests using xfstests.

Signed-off-by: Junchao Sun <sunjunchao2870@gmail.com>
---
 fs/btrfs/delayed-ref.c |  6 +++---
 fs/btrfs/qgroup.c      | 21 ++++++++++++++++++---
 fs/btrfs/qgroup.h      |  6 +++++-
 fs/btrfs/super.c       |  3 +++
 4 files changed, 29 insertions(+), 7 deletions(-)

Comments

David Sterba May 27, 2024, 3:27 p.m. UTC | #1
On Mon, May 27, 2024 at 06:13:34PM +0800, Junchao Sun wrote:
> Fixes a todo in qgroup code by utilizing kmem cache to accelerate
> the allocation of struct btrfs_qgroup_extent_record.

The TODO is almost 9 years old so it should be evaluated if it's
applicable.

> This patch has passed the check -g qgroup tests using xfstests.

Changing kmalloc to kmem_cache should be justified and explained why
it's done. I'm not sure we need it given that it's been working fine so
far. Also the quotas can be enabled or disabled during a single mount
it's not necessary to create the dedicated kmem cache and leave it
unused if quotas are disabled. Here using the generic slab is
convenient.

If you think there is a reason to use kmem cache please let us know.
Otherwise it would be better to delete the TODO line.
JunChao Sun May 28, 2024, 6:17 a.m. UTC | #2
David Sterba <dsterba@suse.cz> 于2024年5月27日周一 23:27写道:
>
> On Mon, May 27, 2024 at 06:13:34PM +0800, Junchao Sun wrote:
> > Fixes a todo in qgroup code by utilizing kmem cache to accelerate
> > the allocation of struct btrfs_qgroup_extent_record.
>
> The TODO is almost 9 years old so it should be evaluated if it's
> applicable.
>
> > This patch has passed the check -g qgroup tests using xfstests.
>
>
> > Changing kmalloc to kmem_cache should be justified and explained why
> > it's done. I'm not sure we need it given that it's been working fine so
> > far. Also the quotas can be enabled or disabled during a single mount
> > it's not necessary to create the dedicated kmem cache and leave it
> > unused if quotas are disabled. Here using the generic slab is
> > convenient.
It's reasonable. I will send a patch to delete the todo line.
>
> If you think there is a reason to use kmem cache please let us know.
> Otherwise it would be better to delete the TODO line.
diff mbox series

Patch

diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index e44e62cf76bc..d2d6bda6ccf7 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -916,7 +916,7 @@  add_delayed_ref_head(struct btrfs_trans_handle *trans,
 	if (qrecord) {
 		if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
 					delayed_refs, qrecord))
-			kfree(qrecord);
+			kmem_cache_free(btrfs_qgroup_extent_record_cachep, qrecord);
 		else
 			qrecord_inserted = true;
 	}
@@ -1088,7 +1088,7 @@  int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 	}
 
 	if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
-		record = kzalloc(sizeof(*record), GFP_NOFS);
+		record = kmem_cache_zalloc(btrfs_qgroup_extent_record_cachep, GFP_NOFS);
 		if (!record) {
 			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 			kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
@@ -1191,7 +1191,7 @@  int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 	}
 
 	if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
-		record = kzalloc(sizeof(*record), GFP_NOFS);
+		record = kmem_cache_zalloc(btrfs_qgroup_extent_record_cachep, GFP_NOFS);
 		if (!record) {
 			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
 			kmem_cache_free(btrfs_delayed_ref_head_cachep,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 40e5f7f2fcb7..5f72909bfcf2 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -30,6 +30,7 @@ 
 #include "root-tree.h"
 #include "tree-checker.h"
 
+struct kmem_cache *btrfs_qgroup_extent_record_cachep;
 enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info)
 {
 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
@@ -2024,7 +2025,7 @@  int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
 
 	if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
 		return 0;
-	record = kzalloc(sizeof(*record), GFP_NOFS);
+	record = kmem_cache_zalloc(btrfs_qgroup_extent_record_cachep, GFP_NOFS);
 	if (!record)
 		return -ENOMEM;
 
@@ -2985,7 +2986,7 @@  int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
 		ulist_free(new_roots);
 		new_roots = NULL;
 		rb_erase(node, &delayed_refs->dirty_extent_root);
-		kfree(record);
+		kmem_cache_free(btrfs_qgroup_extent_record_cachep, record);
 
 	}
 	trace_qgroup_num_dirty_extents(fs_info, trans->transid,
@@ -4783,7 +4784,7 @@  void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
 	root = &trans->delayed_refs.dirty_extent_root;
 	rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
 		ulist_free(entry->old_roots);
-		kfree(entry);
+		kmem_cache_free(btrfs_qgroup_extent_record_cachep, entry);
 	}
 	*root = RB_ROOT;
 }
@@ -4845,3 +4846,17 @@  int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
 	spin_unlock(&fs_info->qgroup_lock);
 	return ret;
 }
+
+void __cold btrfs_qgroup_exit(void)
+{
+	kmem_cache_destroy(btrfs_qgroup_extent_record_cachep);
+}
+
+int __init btrfs_qgroup_init(void)
+{
+	btrfs_qgroup_extent_record_cachep = KMEM_CACHE(btrfs_qgroup_extent_record, 0);
+	if (!btrfs_qgroup_extent_record_cachep)
+		return -ENOMEM;
+
+	return 0;
+}
\ No newline at end of file
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 706640be0ec2..3975c32ac23e 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -123,7 +123,6 @@  struct btrfs_inode;
 
 /*
  * Record a dirty extent, and info qgroup to update quota on it
- * TODO: Use kmem cache to alloc it.
  */
 struct btrfs_qgroup_extent_record {
 	struct rb_node node;
@@ -312,6 +311,11 @@  enum btrfs_qgroup_mode {
 	BTRFS_QGROUP_MODE_SIMPLE
 };
 
+extern struct kmem_cache *btrfs_qgroup_extent_record_cachep;
+
+void __cold btrfs_qgroup_exit(void);
+int __init btrfs_qgroup_init(void);
+
 enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info);
 bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info);
 bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 7e44ccaf348f..0fe383ef816b 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2506,6 +2506,9 @@  static const struct init_sequence mod_init_seq[] = {
 	}, {
 		.init_func = btrfs_delayed_ref_init,
 		.exit_func = btrfs_delayed_ref_exit,
+	}, {
+		.init_func = btrfs_qgroup_init,
+		.exit_func = btrfs_qgroup_exit,
 	}, {
 		.init_func = btrfs_prelim_ref_init,
 		.exit_func = btrfs_prelim_ref_exit,