diff mbox series

[RFC,1/8] shmem: replace BLOCKS_PER_PAGE with PAGE_SECTORS

Message ID 20230421214400.2836131-2-mcgrof@kernel.org (mailing list archive)
State New
Headers show
Series shmem: add support for blocksize > PAGE_SIZE | expand

Commit Message

Luis Chamberlain April 21, 2023, 9:43 p.m. UTC
Instead of having our own macro use the generic PAGE_SECTORS.
It also makes it clearer what we are trying to compute here on
the inode->i_blocks. We get the inode size by as define din
__inode_get_bytes() by:

(inode->i_blocks << SECTOR_SHIFT) + inode->i_bytes

This produces no functional changes.

Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
---
 mm/shmem.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/mm/shmem.c b/mm/shmem.c
index b5d102a2a766..5bf92d571092 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -86,7 +86,6 @@  static struct vfsmount *shm_mnt;
 
 #include "internal.h"
 
-#define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
 
 /* Pretend that each entry is of this size in directory's i_size */
@@ -363,7 +362,7 @@  static void shmem_recalc_inode(struct inode *inode)
 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
 	if (freed > 0) {
 		info->alloced -= freed;
-		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
+		inode->i_blocks -= freed * PAGE_SECTORS;
 		shmem_inode_unacct_blocks(inode, freed);
 	}
 }
@@ -381,7 +380,7 @@  bool shmem_charge(struct inode *inode, long pages)
 
 	spin_lock_irqsave(&info->lock, flags);
 	info->alloced += pages;
-	inode->i_blocks += pages * BLOCKS_PER_PAGE;
+	inode->i_blocks += pages * PAGE_SECTORS;
 	shmem_recalc_inode(inode);
 	spin_unlock_irqrestore(&info->lock, flags);
 
@@ -397,7 +396,7 @@  void shmem_uncharge(struct inode *inode, long pages)
 
 	spin_lock_irqsave(&info->lock, flags);
 	info->alloced -= pages;
-	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
+	inode->i_blocks -= pages * PAGE_SECTORS;
 	shmem_recalc_inode(inode);
 	spin_unlock_irqrestore(&info->lock, flags);
 
@@ -2002,7 +2001,7 @@  static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
 
 	spin_lock_irq(&info->lock);
 	info->alloced += folio_nr_pages(folio);
-	inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio);
+	inode->i_blocks += (blkcnt_t) PAGE_SECTORS << folio_order(folio);
 	shmem_recalc_inode(inode);
 	spin_unlock_irq(&info->lock);
 	alloced = true;
@@ -2659,7 +2658,7 @@  int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
 
 	spin_lock_irq(&info->lock);
 	info->alloced++;
-	inode->i_blocks += BLOCKS_PER_PAGE;
+	inode->i_blocks += PAGE_SECTORS;
 	shmem_recalc_inode(inode);
 	spin_unlock_irq(&info->lock);