diff mbox

[3/3] mm: drop PAGE_CACHE_* and page_cache_{get,release} definition

Message ID 1458561998-126622-4-git-send-email-kirill.shutemov@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Kirill A. Shutemov March 21, 2016, 12:06 p.m. UTC
All users gone. We can remove these macros.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/pagemap.h | 15 ---------------
 1 file changed, 15 deletions(-)

Comments

Michal Hocko March 21, 2016, 12:53 p.m. UTC | #1
On Mon 21-03-16 15:06:38, Kirill A. Shutemov wrote:
> All users gone. We can remove these macros.
> 
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

\o/

Acked-by: Michal Hocko <mhocko@suse.com>

Thanks!

> ---
>  include/linux/pagemap.h | 15 ---------------
>  1 file changed, 15 deletions(-)
> 
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index b3fc0370c14f..7e1ab155c67c 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -86,21 +86,6 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
>  				(__force unsigned long)mask;
>  }
>  
> -/*
> - * The page cache can be done in larger chunks than
> - * one page, because it allows for more efficient
> - * throughput (it can then be mapped into user
> - * space in smaller chunks for same flexibility).
> - *
> - * Or rather, it _will_ be done in larger chunks.
> - */
> -#define PAGE_CACHE_SHIFT	PAGE_SHIFT
> -#define PAGE_CACHE_SIZE		PAGE_SIZE
> -#define PAGE_CACHE_MASK		PAGE_MASK
> -#define PAGE_CACHE_ALIGN(addr)	(((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
> -
> -#define page_cache_get(page)		get_page(page)
> -#define page_cache_release(page)	put_page(page)
>  void release_pages(struct page **pages, int nr, bool cold);
>  
>  /*
> -- 
> 2.7.0
diff mbox

Patch

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b3fc0370c14f..7e1ab155c67c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -86,21 +86,6 @@  static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
 				(__force unsigned long)mask;
 }
 
-/*
- * The page cache can be done in larger chunks than
- * one page, because it allows for more efficient
- * throughput (it can then be mapped into user
- * space in smaller chunks for same flexibility).
- *
- * Or rather, it _will_ be done in larger chunks.
- */
-#define PAGE_CACHE_SHIFT	PAGE_SHIFT
-#define PAGE_CACHE_SIZE		PAGE_SIZE
-#define PAGE_CACHE_MASK		PAGE_MASK
-#define PAGE_CACHE_ALIGN(addr)	(((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
-
-#define page_cache_get(page)		get_page(page)
-#define page_cache_release(page)	put_page(page)
 void release_pages(struct page **pages, int nr, bool cold);
 
 /*