diff mbox series

[2/2] filemap: Remove add_to_page_cache() and add_to_page_cache_locked()

Message ID 20220601192333.1560777-2-willy@infradead.org (mailing list archive)
State New
Headers show
Series [1/2] hugetlb: Convert huge_add_to_page_cache() to use a folio | expand

Commit Message

Matthew Wilcox June 1, 2022, 7:23 p.m. UTC
These functions have no more users, so delete them.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 .../admin-guide/cgroup-v1/memcg_test.rst      |  2 +-
 include/linux/pagemap.h                       | 18 -----------------
 mm/filemap.c                                  | 20 -------------------
 mm/shmem.c                                    |  2 +-
 mm/swap_state.c                               |  2 +-
 5 files changed, 3 insertions(+), 41 deletions(-)

Comments

Mike Kravetz June 1, 2022, 8:56 p.m. UTC | #1
On 6/1/22 12:23, Matthew Wilcox (Oracle) wrote:
> These functions have no more users, so delete them.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  .../admin-guide/cgroup-v1/memcg_test.rst      |  2 +-
>  include/linux/pagemap.h                       | 18 -----------------
>  mm/filemap.c                                  | 20 -------------------
>  mm/shmem.c                                    |  2 +-
>  mm/swap_state.c                               |  2 +-
>  5 files changed, 3 insertions(+), 41 deletions(-)

grep comes up empty,

Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
Muchun Song June 2, 2022, 2:49 a.m. UTC | #2
On Wed, Jun 01, 2022 at 08:23:33PM +0100, Matthew Wilcox (Oracle) wrote:
> These functions have no more users, so delete them.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Muchun Song <songmuchun@bytedance.com>

Thanks.
diff mbox series

Patch

diff --git a/Documentation/admin-guide/cgroup-v1/memcg_test.rst b/Documentation/admin-guide/cgroup-v1/memcg_test.rst
index 45b94f7b3beb..a402359abb99 100644
--- a/Documentation/admin-guide/cgroup-v1/memcg_test.rst
+++ b/Documentation/admin-guide/cgroup-v1/memcg_test.rst
@@ -97,7 +97,7 @@  Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
 =============
 
 	Page Cache is charged at
-	- add_to_page_cache_locked().
+	- filemap_add_folio().
 
 	The logic is very clear. (About migration, see below)
 
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ce96866fbec4..5555689ea809 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1098,8 +1098,6 @@  size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
 size_t fault_in_readable(const char __user *uaddr, size_t size);
 
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
-		pgoff_t index, gfp_t gfp);
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 		pgoff_t index, gfp_t gfp);
 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
@@ -1119,22 +1117,6 @@  bool filemap_release_folio(struct folio *folio, gfp_t gfp);
 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
 		int whence);
 
-/*
- * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run __SetPageLocked() against it.
- */
-static inline int add_to_page_cache(struct page *page,
-		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
-{
-	int error;
-
-	__SetPageLocked(page);
-	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
-	if (unlikely(error))
-		__ClearPageLocked(page);
-	return error;
-}
-
 /* Must be non-static for BPF error injection */
 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
 		pgoff_t index, gfp_t gfp, void **shadowp);
diff --git a/mm/filemap.c b/mm/filemap.c
index 9daeaab36081..1e66eea98a7e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -929,26 +929,6 @@  noinline int __filemap_add_folio(struct address_space *mapping,
 }
 ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
 
-/**
- * add_to_page_cache_locked - add a locked page to the pagecache
- * @page:	page to add
- * @mapping:	the page's address_space
- * @offset:	page index
- * @gfp_mask:	page allocation mode
- *
- * This function is used to add a page to the pagecache. It must be locked.
- * This function does not add the page to the LRU.  The caller must do that.
- *
- * Return: %0 on success, negative error code otherwise.
- */
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
-		pgoff_t offset, gfp_t gfp_mask)
-{
-	return __filemap_add_folio(mapping, page_folio(page), offset,
-					  gfp_mask, NULL);
-}
-EXPORT_SYMBOL(add_to_page_cache_locked);
-
 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
 				pgoff_t index, gfp_t gfp)
 {
diff --git a/mm/shmem.c b/mm/shmem.c
index a6f565308133..60fdfc0208fd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -693,7 +693,7 @@  static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 /*
- * Like add_to_page_cache_locked, but error if expected item has gone.
+ * Like filemap_add_folio, but error if expected item has gone.
  */
 static int shmem_add_to_page_cache(struct folio *folio,
 				   struct address_space *mapping,
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 778d57d2d92d..f5b6f5638908 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -95,7 +95,7 @@  void *get_shadow_from_swap_cache(swp_entry_t entry)
 }
 
 /*
- * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
+ * add_to_swap_cache resembles filemap_add_folio on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
 int add_to_swap_cache(struct page *page, swp_entry_t entry,