diff mbox series

[RFC,1/2] mm: remove duplicated flush_dcache_folio()

Message ID 20230216160528.2146188-2-fengwei.yin@intel.com (mailing list archive)
State New
Headers show
Series minor cleanup of usage of flush_dcache_folio() | expand

Commit Message

Yin Fengwei Feb. 16, 2023, 4:05 p.m. UTC
folio_zero_range() calls the flush_dcache_folio() already. Remove
unnecessary flush_dcache_folio() call.

Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
---
 fs/libfs.c | 1 -
 mm/shmem.c | 7 +------
 2 files changed, 1 insertion(+), 7 deletions(-)

Comments

Ira Weiny Feb. 27, 2023, 5:46 a.m. UTC | #1
Yin Fengwei wrote:
> folio_zero_range() calls the flush_dcache_folio() already. Remove
> unnecessary flush_dcache_folio() call.

The change is probably reasonable but this statement is not exactly true.

The detail is that flush_dcache_page() is already called and another loop
through the folio pages is unneeded.  Not to mention hiding these flush
calls is nice because it is so hard to know when to use them.

> 
> Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
> ---
>  fs/libfs.c | 1 -
>  mm/shmem.c | 7 +------
>  2 files changed, 1 insertion(+), 7 deletions(-)
> 
> diff --git a/fs/libfs.c b/fs/libfs.c
> index 4eda519c3002..d57370c8e382 100644
> --- a/fs/libfs.c
> +++ b/fs/libfs.c
> @@ -543,7 +543,6 @@ EXPORT_SYMBOL(simple_setattr);
>  static int simple_read_folio(struct file *file, struct folio *folio)
>  {
>  	folio_zero_range(folio, 0, folio_size(folio));
> -	flush_dcache_folio(folio);
>  	folio_mark_uptodate(folio);
>  	folio_unlock(folio);
>  	return 0;
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 448f393d8ab2..66e50f0a15ab 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1401,7 +1401,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
>  				goto redirty;
>  		}
>  		folio_zero_range(folio, 0, folio_size(folio));
> -		flush_dcache_folio(folio);
>  		folio_mark_uptodate(folio);
>  	}
>  
> @@ -2010,11 +2009,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
>  	 * it now, lest undo on failure cancel our earlier guarantee.
>  	 */
>  	if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
> -		long i, n = folio_nr_pages(folio);
> -
> -		for (i = 0; i < n; i++)
> -			clear_highpage(folio_page(folio, i));
> -		flush_dcache_folio(folio);
> +		folio_zero_range(folio, 0, folio_size(folio));

This is a separate optimization from what your cover letter explained.

Ira
Yin Fengwei Feb. 27, 2023, 6:14 a.m. UTC | #2
Hi Ira,

On 2/27/2023 1:46 PM, Ira Weiny wrote:
> Yin Fengwei wrote:
>> folio_zero_range() calls the flush_dcache_folio() already. Remove
>> unnecessary flush_dcache_folio() call.
> 
> The change is probably reasonable but this statement is not exactly true.
> 
> The detail is that flush_dcache_page() is already called and another loop
> through the folio pages is unneeded.  Not to mention hiding these flush
> calls is nice because it is so hard to know when to use them.
Thanks again for checking the patch and sharing the comments.

Yes. This patch mainly focus on the unneeded dcache flush when I checked the
flush_dcache_folio() related code.


Regards
Yin, Fengwei

> 
>>
>> Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
>> ---
>>  fs/libfs.c | 1 -
>>  mm/shmem.c | 7 +------
>>  2 files changed, 1 insertion(+), 7 deletions(-)
>>
>> diff --git a/fs/libfs.c b/fs/libfs.c
>> index 4eda519c3002..d57370c8e382 100644
>> --- a/fs/libfs.c
>> +++ b/fs/libfs.c
>> @@ -543,7 +543,6 @@ EXPORT_SYMBOL(simple_setattr);
>>  static int simple_read_folio(struct file *file, struct folio *folio)
>>  {
>>  	folio_zero_range(folio, 0, folio_size(folio));
>> -	flush_dcache_folio(folio);
>>  	folio_mark_uptodate(folio);
>>  	folio_unlock(folio);
>>  	return 0;
>> diff --git a/mm/shmem.c b/mm/shmem.c
>> index 448f393d8ab2..66e50f0a15ab 100644
>> --- a/mm/shmem.c
>> +++ b/mm/shmem.c
>> @@ -1401,7 +1401,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
>>  				goto redirty;
>>  		}
>>  		folio_zero_range(folio, 0, folio_size(folio));
>> -		flush_dcache_folio(folio);
>>  		folio_mark_uptodate(folio);
>>  	}
>>  
>> @@ -2010,11 +2009,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
>>  	 * it now, lest undo on failure cancel our earlier guarantee.
>>  	 */
>>  	if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
>> -		long i, n = folio_nr_pages(folio);
>> -
>> -		for (i = 0; i < n; i++)
>> -			clear_highpage(folio_page(folio, i));
>> -		flush_dcache_folio(folio);
>> +		folio_zero_range(folio, 0, folio_size(folio));
> 
> This is a separate optimization from what your cover letter explained.
> 
> Ira
diff mbox series

Patch

diff --git a/fs/libfs.c b/fs/libfs.c
index 4eda519c3002..d57370c8e382 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -543,7 +543,6 @@  EXPORT_SYMBOL(simple_setattr);
 static int simple_read_folio(struct file *file, struct folio *folio)
 {
 	folio_zero_range(folio, 0, folio_size(folio));
-	flush_dcache_folio(folio);
 	folio_mark_uptodate(folio);
 	folio_unlock(folio);
 	return 0;
diff --git a/mm/shmem.c b/mm/shmem.c
index 448f393d8ab2..66e50f0a15ab 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1401,7 +1401,6 @@  static int shmem_writepage(struct page *page, struct writeback_control *wbc)
 				goto redirty;
 		}
 		folio_zero_range(folio, 0, folio_size(folio));
-		flush_dcache_folio(folio);
 		folio_mark_uptodate(folio);
 	}
 
@@ -2010,11 +2009,7 @@  static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
 	 * it now, lest undo on failure cancel our earlier guarantee.
 	 */
 	if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
-		long i, n = folio_nr_pages(folio);
-
-		for (i = 0; i < n; i++)
-			clear_highpage(folio_page(folio, i));
-		flush_dcache_folio(folio);
+		folio_zero_range(folio, 0, folio_size(folio));
 		folio_mark_uptodate(folio);
 	}