diff mbox series

[RFC,v3,for-6.8/block,04/17] mtd: block2mtd: use bdev apis

Message ID 20231221085712.1766333-5-yukuai1@huaweicloud.com (mailing list archive)
State New, archived
Headers show
Series block: don't access bd_inode directly from other modules | expand

Commit Message

Yu Kuai Dec. 21, 2023, 8:56 a.m. UTC
From: Yu Kuai <yukuai3@huawei.com>

On the one hand covert to use folio while reading bdev inode, on the
other hand prevent to access bd_inode directly.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 drivers/mtd/devices/block2mtd.c | 81 +++++++++++++++------------------
 1 file changed, 36 insertions(+), 45 deletions(-)

Comments

Jan Kara Jan. 4, 2024, 11:28 a.m. UTC | #1
On Thu 21-12-23 16:56:59, Yu Kuai wrote:
> From: Yu Kuai <yukuai3@huawei.com>
> 
> On the one hand covert to use folio while reading bdev inode, on the
> other hand prevent to access bd_inode directly.
> 
> Signed-off-by: Yu Kuai <yukuai3@huawei.com>
...
> +		for (p = folio_address(folio); p < max; p++)
>  			if (*p != -1UL) {
> -				lock_page(page);
> -				memset(page_address(page), 0xff, PAGE_SIZE);
> -				set_page_dirty(page);
> -				unlock_page(page);
> -				balance_dirty_pages_ratelimited(mapping);
> +				folio_lock(folio);
> +				memset(folio_address(folio), 0xff,
> +				       folio_size(folio));
> +				folio_mark_dirty(folio);
> +				folio_unlock(folio);
> +				bdev_balance_dirty_pages_ratelimited(bdev);

Rather then creating this bdev_balance_dirty_pages_ratelimited() just for
MTD perhaps we can have here (and in other functions):

				...
				mapping = folio_mapping(folio);
				folio_unlock(folio);
				if (mapping)
					balance_dirty_pages_ratelimited(mapping);

What do you think? Because when we are working with the folios it is rather
natural to use their mapping for dirty balancing?

								Honza
Yu Kuai Jan. 4, 2024, 12:22 p.m. UTC | #2
Hi,

在 2024/01/04 19:28, Jan Kara 写道:
> On Thu 21-12-23 16:56:59, Yu Kuai wrote:
>> From: Yu Kuai <yukuai3@huawei.com>
>>
>> On the one hand covert to use folio while reading bdev inode, on the
>> other hand prevent to access bd_inode directly.
>>
>> Signed-off-by: Yu Kuai <yukuai3@huawei.com>
> ...
>> +		for (p = folio_address(folio); p < max; p++)
>>   			if (*p != -1UL) {
>> -				lock_page(page);
>> -				memset(page_address(page), 0xff, PAGE_SIZE);
>> -				set_page_dirty(page);
>> -				unlock_page(page);
>> -				balance_dirty_pages_ratelimited(mapping);
>> +				folio_lock(folio);
>> +				memset(folio_address(folio), 0xff,
>> +				       folio_size(folio));
>> +				folio_mark_dirty(folio);
>> +				folio_unlock(folio);
>> +				bdev_balance_dirty_pages_ratelimited(bdev);
> 
> Rather then creating this bdev_balance_dirty_pages_ratelimited() just for
> MTD perhaps we can have here (and in other functions):
> 
> 				...
> 				mapping = folio_mapping(folio);
> 				folio_unlock(folio);
> 				if (mapping)
> 					balance_dirty_pages_ratelimited(mapping);
> 
> What do you think? Because when we are working with the folios it is rather
> natural to use their mapping for dirty balancing?

I think this is a great idea! And bdev_balance_dirty_pages_ratelimited()
can be removed as well.

Thanks,
Kuai

> 
> 								Honza
>
'Christoph Hellwig' Jan. 5, 2024, 6:10 a.m. UTC | #3
On Thu, Jan 04, 2024 at 12:28:55PM +0100, Jan Kara wrote:
> What do you think? Because when we are working with the folios it is rather
> natural to use their mapping for dirty balancing?

The real problem is that block2mtd pokes way to deep into block
internals.

I think the saviour here is Christians series to replace the bdev handle
with a struct file, which will allow to use the normal file write path
here and get rid of the entire layering volation.
Yu Kuai Jan. 5, 2024, 10:31 a.m. UTC | #4
Hi,

在 2024/01/05 14:10, Christoph Hellwig 写道:
> On Thu, Jan 04, 2024 at 12:28:55PM +0100, Jan Kara wrote:
>> What do you think? Because when we are working with the folios it is rather
>> natural to use their mapping for dirty balancing?
> 
> The real problem is that block2mtd pokes way to deep into block
> internals.
> 
> I think the saviour here is Christians series to replace the bdev handle
> with a struct file, which will allow to use the normal file write path
> here and get rid of the entire layering volation.

Yes, looks like lots of patches from this set is not needed anymore.
I'll stop sending v4 and just send some patches that is not related to
'bd_inode' separately.

Thanks,
Kuai

> 
> .
>
diff mbox series

Patch

diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index aa44a23ec045..cf201bf73184 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -46,40 +46,34 @@  struct block2mtd_dev {
 /* Static info about the MTD, used in cleanup_module */
 static LIST_HEAD(blkmtd_device_list);
 
-
-static struct page *page_read(struct address_space *mapping, pgoff_t index)
-{
-	return read_mapping_page(mapping, index, NULL);
-}
-
 /* erase a specified part of the device */
 static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
 {
-	struct address_space *mapping =
-				dev->bdev_handle->bdev->bd_inode->i_mapping;
-	struct page *page;
+	struct block_device *bdev = dev->bdev_handle->bdev;
+	struct folio *folio;
 	pgoff_t index = to >> PAGE_SHIFT;	// page index
 	int pages = len >> PAGE_SHIFT;
 	u_long *p;
 	u_long *max;
 
 	while (pages) {
-		page = page_read(mapping, index);
-		if (IS_ERR(page))
-			return PTR_ERR(page);
+		folio = bdev_read_folio(bdev, index << PAGE_SHIFT);
+		if (IS_ERR(folio))
+			return PTR_ERR(folio);
 
-		max = page_address(page) + PAGE_SIZE;
-		for (p=page_address(page); p<max; p++)
+		max = folio_address(folio) + folio_size(folio);
+		for (p = folio_address(folio); p < max; p++)
 			if (*p != -1UL) {
-				lock_page(page);
-				memset(page_address(page), 0xff, PAGE_SIZE);
-				set_page_dirty(page);
-				unlock_page(page);
-				balance_dirty_pages_ratelimited(mapping);
+				folio_lock(folio);
+				memset(folio_address(folio), 0xff,
+				       folio_size(folio));
+				folio_mark_dirty(folio);
+				folio_unlock(folio);
+				bdev_balance_dirty_pages_ratelimited(bdev);
 				break;
 			}
 
-		put_page(page);
+		folio_put(folio);
 		pages--;
 		index++;
 	}
@@ -106,9 +100,7 @@  static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
 		size_t *retlen, u_char *buf)
 {
 	struct block2mtd_dev *dev = mtd->priv;
-	struct address_space *mapping =
-				dev->bdev_handle->bdev->bd_inode->i_mapping;
-	struct page *page;
+	struct folio *folio;
 	pgoff_t index = from >> PAGE_SHIFT;
 	int offset = from & (PAGE_SIZE-1);
 	int cpylen;
@@ -120,12 +112,13 @@  static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
 			cpylen = len;	// this page
 		len = len - cpylen;
 
-		page = page_read(mapping, index);
-		if (IS_ERR(page))
-			return PTR_ERR(page);
+		folio = bdev_read_folio(dev->bdev_handle->bdev,
+					index << PAGE_SHIFT);
+		if (IS_ERR(folio))
+			return PTR_ERR(folio);
 
-		memcpy(buf, page_address(page) + offset, cpylen);
-		put_page(page);
+		memcpy(buf, folio_address(folio) + offset, cpylen);
+		folio_put(folio);
 
 		if (retlen)
 			*retlen += cpylen;
@@ -141,9 +134,8 @@  static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
 static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
 		loff_t to, size_t len, size_t *retlen)
 {
-	struct page *page;
-	struct address_space *mapping =
-				dev->bdev_handle->bdev->bd_inode->i_mapping;
+	struct block_device *bdev = dev->bdev_handle->bdev;
+	struct folio *folio;
 	pgoff_t index = to >> PAGE_SHIFT;	// page index
 	int offset = to & ~PAGE_MASK;	// page offset
 	int cpylen;
@@ -155,18 +147,18 @@  static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
 			cpylen = len;			// this page
 		len = len - cpylen;
 
-		page = page_read(mapping, index);
-		if (IS_ERR(page))
-			return PTR_ERR(page);
+		folio = bdev_read_folio(bdev, index << PAGE_SHIFT);
+		if (IS_ERR(folio))
+			return PTR_ERR(folio);
 
-		if (memcmp(page_address(page)+offset, buf, cpylen)) {
-			lock_page(page);
-			memcpy(page_address(page) + offset, buf, cpylen);
-			set_page_dirty(page);
-			unlock_page(page);
-			balance_dirty_pages_ratelimited(mapping);
+		if (memcmp(folio_address(folio) + offset, buf, cpylen)) {
+			folio_lock(folio);
+			memcpy(folio_address(folio) + offset, buf, cpylen);
+			folio_mark_dirty(folio);
+			folio_unlock(folio);
+			bdev_balance_dirty_pages_ratelimited(bdev);
 		}
-		put_page(page);
+		folio_put(folio);
 
 		if (retlen)
 			*retlen += cpylen;
@@ -211,8 +203,7 @@  static void block2mtd_free_device(struct block2mtd_dev *dev)
 	kfree(dev->mtd.name);
 
 	if (dev->bdev_handle) {
-		invalidate_mapping_pages(
-			dev->bdev_handle->bdev->bd_inode->i_mapping, 0, -1);
+		invalidate_bdev(dev->bdev_handle->bdev);
 		bdev_release(dev->bdev_handle);
 	}
 
@@ -295,7 +286,7 @@  static struct block2mtd_dev *add_device(char *devname, int erase_size,
 		goto err_free_block2mtd;
 	}
 
-	if ((long)bdev->bd_inode->i_size % erase_size) {
+	if (bdev_nr_bytes(bdev) % erase_size) {
 		pr_err("erasesize must be a divisor of device size\n");
 		goto err_free_block2mtd;
 	}
@@ -313,7 +304,7 @@  static struct block2mtd_dev *add_device(char *devname, int erase_size,
 
 	dev->mtd.name = name;
 
-	dev->mtd.size = bdev->bd_inode->i_size & PAGE_MASK;
+	dev->mtd.size = bdev_nr_bytes(bdev) & PAGE_MASK;
 	dev->mtd.erasesize = erase_size;
 	dev->mtd.writesize = 1;
 	dev->mtd.writebufsize = PAGE_SIZE;