diff mbox series

[3/3] hfs: Replace kmap() with kmap_local_page() in btree.c

Message ID 20220809152004.9223-4-fmdefrancesco@gmail.com (mailing list archive)
State New, archived
Headers show
Series hfs: Replace kmap() with kmap_local_page() | expand

Commit Message

Fabio M. De Francesco Aug. 9, 2022, 3:20 p.m. UTC
kmap() is being deprecated in favor of kmap_local_page().

Two main problems with kmap(): (1) It comes with an overhead as mapping
space is restricted and protected by a global lock for synchronization and
(2) it also requires global TLB invalidation when the kmap’s pool wraps
and it might block when the mapping space is fully utilized until a slot
becomes available.

With kmap_local_page() the mappings are per thread, CPU local, can take
page faults, and can be called from any context (including interrupts).
It is faster than kmap() in kernels with HIGHMEM enabled. Furthermore,
the tasks can be preempted and, when they are scheduled to run again, the
kernel virtual addresses are restored and still valid.

Since its use in btree.c is safe everywhere, it should be preferred.

Therefore, replace kmap() with kmap_local_page() in btree.c. Where
possible, use the suited standard helpers (memzero_page(), memcpy_page())
instead of open coding kmap_local_page() plus memset() or memcpy().

Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
HIGHMEM64GB enabled.

Suggested-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
---
 fs/hfs/btree.c | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

Comments

Viacheslav Dubeyko Aug. 9, 2022, 6:03 p.m. UTC | #1
> On Aug 9, 2022, at 8:20 AM, Fabio M. De Francesco <fmdefrancesco@gmail.com> wrote:
> 
> kmap() is being deprecated in favor of kmap_local_page().
> 
> Two main problems with kmap(): (1) It comes with an overhead as mapping
> space is restricted and protected by a global lock for synchronization and
> (2) it also requires global TLB invalidation when the kmap’s pool wraps
> and it might block when the mapping space is fully utilized until a slot
> becomes available.
> 
> With kmap_local_page() the mappings are per thread, CPU local, can take
> page faults, and can be called from any context (including interrupts).
> It is faster than kmap() in kernels with HIGHMEM enabled. Furthermore,
> the tasks can be preempted and, when they are scheduled to run again, the
> kernel virtual addresses are restored and still valid.
> 
> Since its use in btree.c is safe everywhere, it should be preferred.
> 
> Therefore, replace kmap() with kmap_local_page() in btree.c. Where
> possible, use the suited standard helpers (memzero_page(), memcpy_page())
> instead of open coding kmap_local_page() plus memset() or memcpy().
> 
> Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
> HIGHMEM64GB enabled.
> 
> Suggested-by: Ira Weiny <ira.weiny@intel.com>
> Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
> ---


Looks good.

Reviewed-by: Viacheslav Dubeyko <slava@dubeyko.com>

Thanks,
Slava.


> fs/hfs/btree.c | 30 ++++++++++++++++--------------
> 1 file changed, 16 insertions(+), 14 deletions(-)
> 
> diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
> index 56c6782436e9..2fa4b1f8cc7f 100644
> --- a/fs/hfs/btree.c
> +++ b/fs/hfs/btree.c
> @@ -80,7 +80,8 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
> 		goto free_inode;
> 
> 	/* Load the header */
> -	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
> +	head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
> +					       sizeof(struct hfs_bnode_desc));
> 	tree->root = be32_to_cpu(head->root);
> 	tree->leaf_count = be32_to_cpu(head->leaf_count);
> 	tree->leaf_head = be32_to_cpu(head->leaf_head);
> @@ -119,12 +120,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
> 	tree->node_size_shift = ffs(size) - 1;
> 	tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
> 
> -	kunmap(page);
> +	kunmap_local(head);
> 	put_page(page);
> 	return tree;
> 
> fail_page:
> -	kunmap(page);
> +	kunmap_local(head);
> 	put_page(page);
> free_inode:
> 	tree->inode->i_mapping->a_ops = &hfs_aops;
> @@ -170,7 +171,8 @@ void hfs_btree_write(struct hfs_btree *tree)
> 		return;
> 	/* Load the header */
> 	page = node->page[0];
> -	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
> +	head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
> +					       sizeof(struct hfs_bnode_desc));
> 
> 	head->root = cpu_to_be32(tree->root);
> 	head->leaf_count = cpu_to_be32(tree->leaf_count);
> @@ -181,7 +183,7 @@ void hfs_btree_write(struct hfs_btree *tree)
> 	head->attributes = cpu_to_be32(tree->attributes);
> 	head->depth = cpu_to_be16(tree->depth);
> 
> -	kunmap(page);
> +	kunmap_local(head);
> 	set_page_dirty(page);
> 	hfs_bnode_put(node);
> }
> @@ -269,7 +271,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
> 
> 	off += node->page_offset;
> 	pagep = node->page + (off >> PAGE_SHIFT);
> -	data = kmap(*pagep);
> +	data = kmap_local_page(*pagep);
> 	off &= ~PAGE_MASK;
> 	idx = 0;
> 
> @@ -282,7 +284,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
> 						idx += i;
> 						data[off] |= m;
> 						set_page_dirty(*pagep);
> -						kunmap(*pagep);
> +						kunmap_local(data);
> 						tree->free_nodes--;
> 						mark_inode_dirty(tree->inode);
> 						hfs_bnode_put(node);
> @@ -291,14 +293,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
> 				}
> 			}
> 			if (++off >= PAGE_SIZE) {
> -				kunmap(*pagep);
> -				data = kmap(*++pagep);
> +				kunmap_local(data);
> +				data = kmap_local_page(*++pagep);
> 				off = 0;
> 			}
> 			idx += 8;
> 			len--;
> 		}
> -		kunmap(*pagep);
> +		kunmap_local(data);
> 		nidx = node->next;
> 		if (!nidx) {
> 			printk(KERN_DEBUG "create new bmap node...\n");
> @@ -314,7 +316,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
> 		off = off16;
> 		off += node->page_offset;
> 		pagep = node->page + (off >> PAGE_SHIFT);
> -		data = kmap(*pagep);
> +		data = kmap_local_page(*pagep);
> 		off &= ~PAGE_MASK;
> 	}
> }
> @@ -361,20 +363,20 @@ void hfs_bmap_free(struct hfs_bnode *node)
> 	}
> 	off += node->page_offset + nidx / 8;
> 	page = node->page[off >> PAGE_SHIFT];
> -	data = kmap(page);
> +	data = kmap_local_page(page);
> 	off &= ~PAGE_MASK;
> 	m = 1 << (~nidx & 7);
> 	byte = data[off];
> 	if (!(byte & m)) {
> 		pr_crit("trying to free free bnode %u(%d)\n",
> 			node->this, node->type);
> -		kunmap(page);
> +		kunmap_local(data);
> 		hfs_bnode_put(node);
> 		return;
> 	}
> 	data[off] = byte & ~m;
> 	set_page_dirty(page);
> -	kunmap(page);
> +	kunmap_local(data);
> 	hfs_bnode_put(node);
> 	tree->free_nodes++;
> 	mark_inode_dirty(tree->inode);
> -- 
> 2.37.1
>
diff mbox series

Patch

diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 56c6782436e9..2fa4b1f8cc7f 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -80,7 +80,8 @@  struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
 		goto free_inode;
 
 	/* Load the header */
-	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+	head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
+					       sizeof(struct hfs_bnode_desc));
 	tree->root = be32_to_cpu(head->root);
 	tree->leaf_count = be32_to_cpu(head->leaf_count);
 	tree->leaf_head = be32_to_cpu(head->leaf_head);
@@ -119,12 +120,12 @@  struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
 	tree->node_size_shift = ffs(size) - 1;
 	tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
-	kunmap(page);
+	kunmap_local(head);
 	put_page(page);
 	return tree;
 
 fail_page:
-	kunmap(page);
+	kunmap_local(head);
 	put_page(page);
 free_inode:
 	tree->inode->i_mapping->a_ops = &hfs_aops;
@@ -170,7 +171,8 @@  void hfs_btree_write(struct hfs_btree *tree)
 		return;
 	/* Load the header */
 	page = node->page[0];
-	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+	head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
+					       sizeof(struct hfs_bnode_desc));
 
 	head->root = cpu_to_be32(tree->root);
 	head->leaf_count = cpu_to_be32(tree->leaf_count);
@@ -181,7 +183,7 @@  void hfs_btree_write(struct hfs_btree *tree)
 	head->attributes = cpu_to_be32(tree->attributes);
 	head->depth = cpu_to_be16(tree->depth);
 
-	kunmap(page);
+	kunmap_local(head);
 	set_page_dirty(page);
 	hfs_bnode_put(node);
 }
@@ -269,7 +271,7 @@  struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
 
 	off += node->page_offset;
 	pagep = node->page + (off >> PAGE_SHIFT);
-	data = kmap(*pagep);
+	data = kmap_local_page(*pagep);
 	off &= ~PAGE_MASK;
 	idx = 0;
 
@@ -282,7 +284,7 @@  struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
 						idx += i;
 						data[off] |= m;
 						set_page_dirty(*pagep);
-						kunmap(*pagep);
+						kunmap_local(data);
 						tree->free_nodes--;
 						mark_inode_dirty(tree->inode);
 						hfs_bnode_put(node);
@@ -291,14 +293,14 @@  struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
 				}
 			}
 			if (++off >= PAGE_SIZE) {
-				kunmap(*pagep);
-				data = kmap(*++pagep);
+				kunmap_local(data);
+				data = kmap_local_page(*++pagep);
 				off = 0;
 			}
 			idx += 8;
 			len--;
 		}
-		kunmap(*pagep);
+		kunmap_local(data);
 		nidx = node->next;
 		if (!nidx) {
 			printk(KERN_DEBUG "create new bmap node...\n");
@@ -314,7 +316,7 @@  struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
 		off = off16;
 		off += node->page_offset;
 		pagep = node->page + (off >> PAGE_SHIFT);
-		data = kmap(*pagep);
+		data = kmap_local_page(*pagep);
 		off &= ~PAGE_MASK;
 	}
 }
@@ -361,20 +363,20 @@  void hfs_bmap_free(struct hfs_bnode *node)
 	}
 	off += node->page_offset + nidx / 8;
 	page = node->page[off >> PAGE_SHIFT];
-	data = kmap(page);
+	data = kmap_local_page(page);
 	off &= ~PAGE_MASK;
 	m = 1 << (~nidx & 7);
 	byte = data[off];
 	if (!(byte & m)) {
 		pr_crit("trying to free free bnode %u(%d)\n",
 			node->this, node->type);
-		kunmap(page);
+		kunmap_local(data);
 		hfs_bnode_put(node);
 		return;
 	}
 	data[off] = byte & ~m;
 	set_page_dirty(page);
-	kunmap(page);
+	kunmap_local(data);
 	hfs_bnode_put(node);
 	tree->free_nodes++;
 	mark_inode_dirty(tree->inode);