Message ID | 20220809203105.26183-4-fmdefrancesco@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | hfsplus: Replace kmap() with kmap_local_page() | expand |
> On Aug 9, 2022, at 1:31 PM, Fabio M. De Francesco <fmdefrancesco@gmail.com> wrote: > > kmap() is being deprecated in favor of kmap_local_page(). > > There are two main problems with kmap(): (1) It comes with an overhead as > mapping space is restricted and protected by a global lock for > synchronization and (2) it also requires global TLB invalidation when the > kmap’s pool wraps and it might block when the mapping space is fully > utilized until a slot becomes available. > > With kmap_local_page() the mappings are per thread, CPU local, can take > page faults, and can be called from any context (including interrupts). > It is faster than kmap() in kernels with HIGHMEM enabled. Furthermore, > the tasks can be preempted and, when they are scheduled to run again, the > kernel virtual addresses are restored and are still valid. > > Since its use in bitmap.c is safe everywhere, it should be preferred. > > Therefore, replace kmap() with kmap_local_page() in bitmap.c. > > Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with > HIGHMEM64GB enabled. > > Cc: Viacheslav Dubeyko <slava@dubeyko.com> > Suggested-by: Ira Weiny <ira.weiny@intel.com> > Reviewed-by: Ira Weiny <ira.weiny@intel.com> > Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com> > --- Looks good. Reviewed-by: Viacheslav Dubeyko <slava@dubeyko.com> Thanks, Slava. > fs/hfsplus/bitmap.c | 20 ++++++++++---------- > 1 file changed, 10 insertions(+), 10 deletions(-) > > diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c > index cebce0cfe340..bd8dcea85588 100644 > --- a/fs/hfsplus/bitmap.c > +++ b/fs/hfsplus/bitmap.c > @@ -39,7 +39,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, > start = size; > goto out; > } > - pptr = kmap(page); > + pptr = kmap_local_page(page); > curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; > i = offset % 32; > offset &= ~(PAGE_CACHE_BITS - 1); > @@ -74,7 +74,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, > } > curr++; > } > - kunmap(page); > + kunmap_local(pptr); > offset += PAGE_CACHE_BITS; > if (offset >= size) > break; > @@ -84,7 +84,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, > start = size; > goto out; > } > - curr = pptr = kmap(page); > + curr = pptr = kmap_local_page(page); > if ((size ^ offset) / PAGE_CACHE_BITS) > end = pptr + PAGE_CACHE_BITS / 32; > else > @@ -127,7 +127,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, > len -= 32; > } > set_page_dirty(page); > - kunmap(page); > + kunmap_local(pptr); > offset += PAGE_CACHE_BITS; > page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, > NULL); > @@ -135,7 +135,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, > start = size; > goto out; > } > - pptr = kmap(page); > + pptr = kmap_local_page(page); > curr = pptr; > end = pptr + PAGE_CACHE_BITS / 32; > } > @@ -151,7 +151,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, > done: > *curr = cpu_to_be32(n); > set_page_dirty(page); > - kunmap(page); > + kunmap_local(pptr); > *max = offset + (curr - pptr) * 32 + i - start; > sbi->free_blocks -= *max; > hfsplus_mark_mdb_dirty(sb); > @@ -185,7 +185,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) > page = read_mapping_page(mapping, pnr, NULL); > if (IS_ERR(page)) > goto kaboom; > - pptr = kmap(page); > + pptr = kmap_local_page(page); > curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; > end = pptr + PAGE_CACHE_BITS / 32; > len = count; > @@ -215,11 +215,11 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) > if (!count) > break; > set_page_dirty(page); > - kunmap(page); > + kunmap_local(pptr); > page = read_mapping_page(mapping, ++pnr, NULL); > if (IS_ERR(page)) > goto kaboom; > - pptr = kmap(page); > + pptr = kmap_local_page(page); > curr = pptr; > end = pptr + PAGE_CACHE_BITS / 32; > } > @@ -231,7 +231,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) > } > out: > set_page_dirty(page); > - kunmap(page); > + kunmap_local(pptr); > sbi->free_blocks += len; > hfsplus_mark_mdb_dirty(sb); > mutex_unlock(&sbi->alloc_mutex); > -- > 2.37.1 >
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c index cebce0cfe340..bd8dcea85588 100644 --- a/fs/hfsplus/bitmap.c +++ b/fs/hfsplus/bitmap.c @@ -39,7 +39,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, start = size; goto out; } - pptr = kmap(page); + pptr = kmap_local_page(page); curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; i = offset % 32; offset &= ~(PAGE_CACHE_BITS - 1); @@ -74,7 +74,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, } curr++; } - kunmap(page); + kunmap_local(pptr); offset += PAGE_CACHE_BITS; if (offset >= size) break; @@ -84,7 +84,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, start = size; goto out; } - curr = pptr = kmap(page); + curr = pptr = kmap_local_page(page); if ((size ^ offset) / PAGE_CACHE_BITS) end = pptr + PAGE_CACHE_BITS / 32; else @@ -127,7 +127,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, len -= 32; } set_page_dirty(page); - kunmap(page); + kunmap_local(pptr); offset += PAGE_CACHE_BITS; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); @@ -135,7 +135,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, start = size; goto out; } - pptr = kmap(page); + pptr = kmap_local_page(page); curr = pptr; end = pptr + PAGE_CACHE_BITS / 32; } @@ -151,7 +151,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, done: *curr = cpu_to_be32(n); set_page_dirty(page); - kunmap(page); + kunmap_local(pptr); *max = offset + (curr - pptr) * 32 + i - start; sbi->free_blocks -= *max; hfsplus_mark_mdb_dirty(sb); @@ -185,7 +185,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) page = read_mapping_page(mapping, pnr, NULL); if (IS_ERR(page)) goto kaboom; - pptr = kmap(page); + pptr = kmap_local_page(page); curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; end = pptr + PAGE_CACHE_BITS / 32; len = count; @@ -215,11 +215,11 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) if (!count) break; set_page_dirty(page); - kunmap(page); + kunmap_local(pptr); page = read_mapping_page(mapping, ++pnr, NULL); if (IS_ERR(page)) goto kaboom; - pptr = kmap(page); + pptr = kmap_local_page(page); curr = pptr; end = pptr + PAGE_CACHE_BITS / 32; } @@ -231,7 +231,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) } out: set_page_dirty(page); - kunmap(page); + kunmap_local(pptr); sbi->free_blocks += len; hfsplus_mark_mdb_dirty(sb); mutex_unlock(&sbi->alloc_mutex);