diff mbox series

[v14,054/138] mm: Add kmap_local_folio()

Message ID 20210715033704.692967-55-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Memory folios | expand

Commit Message

Matthew Wilcox July 15, 2021, 3:35 a.m. UTC
This allows us to map a portion of a folio.  Callers can only expect
to access up to the next page boundary.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/highmem-internal.h | 11 +++++++++
 include/linux/highmem.h          | 38 ++++++++++++++++++++++++++++++++
 2 files changed, 49 insertions(+)

Comments

Mike Rapoport July 21, 2021, 9:58 a.m. UTC | #1
On Thu, Jul 15, 2021 at 04:35:40AM +0100, Matthew Wilcox (Oracle) wrote:
> This allows us to map a portion of a folio.  Callers can only expect
> to access up to the next page boundary.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> ---
>  include/linux/highmem-internal.h | 11 +++++++++
>  include/linux/highmem.h          | 38 ++++++++++++++++++++++++++++++++
>  2 files changed, 49 insertions(+)
> 
> diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
> index 7902c7d8b55f..d5d6f930ae1d 100644
> --- a/include/linux/highmem-internal.h
> +++ b/include/linux/highmem-internal.h
> @@ -73,6 +73,12 @@ static inline void *kmap_local_page(struct page *page)
>  	return __kmap_local_page_prot(page, kmap_prot);
>  }
>  
> +static inline void *kmap_local_folio(struct folio *folio, size_t offset)
> +{
> +	struct page *page = folio_page(folio, offset / PAGE_SIZE);
> +	return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
> +}
> +
>  static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
>  {
>  	return __kmap_local_page_prot(page, prot);
> @@ -160,6 +166,11 @@ static inline void *kmap_local_page(struct page *page)
>  	return page_address(page);
>  }
>  
> +static inline void *kmap_local_folio(struct folio *folio, size_t offset)
> +{
> +	return page_address(&folio->page) + offset;
> +}
> +
>  static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
>  {
>  	return kmap_local_page(page);
> diff --git a/include/linux/highmem.h b/include/linux/highmem.h
> index 8c6e8e996c87..85de3bd0b47d 100644
> --- a/include/linux/highmem.h
> +++ b/include/linux/highmem.h
> @@ -96,6 +96,44 @@ static inline void kmap_flush_unused(void);
>   */
>  static inline void *kmap_local_page(struct page *page);
>  
> +/**
> + * kmap_local_folio - Map a page in this folio for temporary usage
> + * @folio:	The folio to be mapped.
> + * @offset:	The byte offset within the folio.
> + *
> + * Returns: The virtual address of the mapping
> + *
> + * Can be invoked from any context.

Context: Can be invoked from any context.

> + *
> + * Requires careful handling when nesting multiple mappings because the map
> + * management is stack based. The unmap has to be in the reverse order of
> + * the map operation:
> + *
> + * addr1 = kmap_local_folio(page1, offset1);
> + * addr2 = kmap_local_folio(page2, offset2);

Please s/page/folio/g here and in the description below

> + * ...
> + * kunmap_local(addr2);
> + * kunmap_local(addr1);
> + *
> + * Unmapping addr1 before addr2 is invalid and causes malfunction.
> + *
> + * Contrary to kmap() mappings the mapping is only valid in the context of
> + * the caller and cannot be handed to other contexts.
> + *
> + * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
> + * virtual address of the direct mapping. Only real highmem pages are
> + * temporarily mapped.
> + *
> + * While it is significantly faster than kmap() for the higmem case it
> + * comes with restrictions about the pointer validity. Only use when really
> + * necessary.
> + *
> + * On HIGHMEM enabled systems mapping a highmem page has the side effect of
> + * disabling migration in order to keep the virtual address stable across
> + * preemption. No caller of kmap_local_folio() can rely on this side effect.
> + */
> +static inline void *kmap_local_folio(struct folio *folio, size_t offset);
> +
>  /**
>   * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
>   * @page:	Pointer to the page to be mapped
> -- 
> 2.30.2
> 
>
Matthew Wilcox July 21, 2021, 2:12 p.m. UTC | #2
On Wed, Jul 21, 2021 at 12:58:24PM +0300, Mike Rapoport wrote:
> > +/**
> > + * kmap_local_folio - Map a page in this folio for temporary usage
> > + * @folio:	The folio to be mapped.
> > + * @offset:	The byte offset within the folio.
> > + *
> > + * Returns: The virtual address of the mapping
> > + *
> > + * Can be invoked from any context.
> 
> Context: Can be invoked from any context.
> 
> > + *
> > + * Requires careful handling when nesting multiple mappings because the map
> > + * management is stack based. The unmap has to be in the reverse order of
> > + * the map operation:
> > + *
> > + * addr1 = kmap_local_folio(page1, offset1);
> > + * addr2 = kmap_local_folio(page2, offset2);
> 
> Please s/page/folio/g here and in the description below
> 
> > + * ...
> > + * kunmap_local(addr2);
> > + * kunmap_local(addr1);
> > + *
> > + * Unmapping addr1 before addr2 is invalid and causes malfunction.
> > + *
> > + * Contrary to kmap() mappings the mapping is only valid in the context of
> > + * the caller and cannot be handed to other contexts.
> > + *
> > + * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
> > + * virtual address of the direct mapping. Only real highmem pages are
> > + * temporarily mapped.
> > + *
> > + * While it is significantly faster than kmap() for the higmem case it
> > + * comes with restrictions about the pointer validity. Only use when really
> > + * necessary.
> > + *
> > + * On HIGHMEM enabled systems mapping a highmem page has the side effect of
> > + * disabling migration in order to keep the virtual address stable across
> > + * preemption. No caller of kmap_local_folio() can rely on this side effect.
> > + */

kmap_local_folio() only maps one page from the folio.  So it's not
appropriate to s/page/folio/g.  I fiddled with the description a bit to
make this clearer:

 /**
  * kmap_local_folio - Map a page in this folio for temporary usage
- * @folio:     The folio to be mapped.
- * @offset:    The byte offset within the folio.
- *
- * Returns: The virtual address of the mapping
- *
- * Can be invoked from any context.
+ * @folio: The folio containing the page.
+ * @offset: The byte offset within the folio which identifies the page.
  *
  * Requires careful handling when nesting multiple mappings because the map
  * management is stack based. The unmap has to be in the reverse order of
  * the map operation:
  *
- * addr1 = kmap_local_folio(page1, offset1);
- * addr2 = kmap_local_folio(page2, offset2);
+ * addr1 = kmap_local_folio(folio1, offset1);
+ * addr2 = kmap_local_folio(folio2, offset2);
  * ...
  * kunmap_local(addr2);
  * kunmap_local(addr1);
@@ -131,6 +127,9 @@ static inline void *kmap_local_page(struct page *page);
  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
  * disabling migration in order to keep the virtual address stable across
  * preemption. No caller of kmap_local_folio() can rely on this side effect.
+ *
+ * Context: Can be invoked from any context.
+ * Return: The virtual address of @offset.
  */
 static inline void *kmap_local_folio(struct folio *folio, size_t offset);
Mike Rapoport July 21, 2021, 2:22 p.m. UTC | #3
On Wed, Jul 21, 2021 at 03:12:03PM +0100, Matthew Wilcox wrote:
> On Wed, Jul 21, 2021 at 12:58:24PM +0300, Mike Rapoport wrote:
> > > +/**
> > > + * kmap_local_folio - Map a page in this folio for temporary usage
> > > + * @folio:	The folio to be mapped.
> > > + * @offset:	The byte offset within the folio.
> > > + *
> > > + * Returns: The virtual address of the mapping
> > > + *
> > > + * Can be invoked from any context.
> > 
> > Context: Can be invoked from any context.
> > 
> > > + *
> > > + * Requires careful handling when nesting multiple mappings because the map
> > > + * management is stack based. The unmap has to be in the reverse order of
> > > + * the map operation:
> > > + *
> > > + * addr1 = kmap_local_folio(page1, offset1);
> > > + * addr2 = kmap_local_folio(page2, offset2);
> > 
> > Please s/page/folio/g here and in the description below
> > 
> > > + * ...
> > > + * kunmap_local(addr2);
> > > + * kunmap_local(addr1);
> > > + *
> > > + * Unmapping addr1 before addr2 is invalid and causes malfunction.
> > > + *
> > > + * Contrary to kmap() mappings the mapping is only valid in the context of
> > > + * the caller and cannot be handed to other contexts.
> > > + *
> > > + * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
> > > + * virtual address of the direct mapping. Only real highmem pages are
> > > + * temporarily mapped.
> > > + *
> > > + * While it is significantly faster than kmap() for the higmem case it
> > > + * comes with restrictions about the pointer validity. Only use when really
> > > + * necessary.
> > > + *
> > > + * On HIGHMEM enabled systems mapping a highmem page has the side effect of
> > > + * disabling migration in order to keep the virtual address stable across
> > > + * preemption. No caller of kmap_local_folio() can rely on this side effect.
> > > + */
> 
> kmap_local_folio() only maps one page from the folio.  So it's not
> appropriate to s/page/folio/g.  I fiddled with the description a bit to
> make this clearer:
> 
>  /**
>   * kmap_local_folio - Map a page in this folio for temporary usage
> - * @folio:     The folio to be mapped.
> - * @offset:    The byte offset within the folio.
> - *
> - * Returns: The virtual address of the mapping
> - *
> - * Can be invoked from any context.
> + * @folio: The folio containing the page.
> + * @offset: The byte offset within the folio which identifies the page.
>   *
>   * Requires careful handling when nesting multiple mappings because the map
>   * management is stack based. The unmap has to be in the reverse order of
>   * the map operation:
>   *
> - * addr1 = kmap_local_folio(page1, offset1);
> - * addr2 = kmap_local_folio(page2, offset2);
> + * addr1 = kmap_local_folio(folio1, offset1);
> + * addr2 = kmap_local_folio(folio2, offset2);
>   * ...
>   * kunmap_local(addr2);
>   * kunmap_local(addr1);
> @@ -131,6 +127,9 @@ static inline void *kmap_local_page(struct page *page);
>   * On HIGHMEM enabled systems mapping a highmem page has the side effect of
>   * disabling migration in order to keep the virtual address stable across
>   * preemption. No caller of kmap_local_folio() can rely on this side effect.
> + *
> + * Context: Can be invoked from any context.
> + * Return: The virtual address of @offset.
>   */
>  static inline void *kmap_local_folio(struct folio *folio, size_t offset)

This is clearer, thanks! 

Maybe just add page to Return: description:

* Return: The virtual address of page @offset.
Matthew Wilcox July 21, 2021, 3:02 p.m. UTC | #4
On Wed, Jul 21, 2021 at 05:22:16PM +0300, Mike Rapoport wrote:
> On Wed, Jul 21, 2021 at 03:12:03PM +0100, Matthew Wilcox wrote:
> > On Wed, Jul 21, 2021 at 12:58:24PM +0300, Mike Rapoport wrote:
> > > > +/**
> > > > + * kmap_local_folio - Map a page in this folio for temporary usage
> > > > + * @folio:	The folio to be mapped.
> > > > + * @offset:	The byte offset within the folio.
> > > > + *
> > > > + * Returns: The virtual address of the mapping
> > > > + *
> > > > + * Can be invoked from any context.
> > > 
> > > Context: Can be invoked from any context.
> > > 
> > > > + *
> > > > + * Requires careful handling when nesting multiple mappings because the map
> > > > + * management is stack based. The unmap has to be in the reverse order of
> > > > + * the map operation:
> > > > + *
> > > > + * addr1 = kmap_local_folio(page1, offset1);
> > > > + * addr2 = kmap_local_folio(page2, offset2);
> > > 
> > > Please s/page/folio/g here and in the description below
> > > 
> > > > + * ...
> > > > + * kunmap_local(addr2);
> > > > + * kunmap_local(addr1);
> > > > + *
> > > > + * Unmapping addr1 before addr2 is invalid and causes malfunction.
> > > > + *
> > > > + * Contrary to kmap() mappings the mapping is only valid in the context of
> > > > + * the caller and cannot be handed to other contexts.
> > > > + *
> > > > + * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
> > > > + * virtual address of the direct mapping. Only real highmem pages are
> > > > + * temporarily mapped.
> > > > + *
> > > > + * While it is significantly faster than kmap() for the higmem case it
> > > > + * comes with restrictions about the pointer validity. Only use when really
> > > > + * necessary.
> > > > + *
> > > > + * On HIGHMEM enabled systems mapping a highmem page has the side effect of
> > > > + * disabling migration in order to keep the virtual address stable across
> > > > + * preemption. No caller of kmap_local_folio() can rely on this side effect.
> > > > + */
> > 
> > kmap_local_folio() only maps one page from the folio.  So it's not
> > appropriate to s/page/folio/g.  I fiddled with the description a bit to
> > make this clearer:
> > 
> >  /**
> >   * kmap_local_folio - Map a page in this folio for temporary usage
> > - * @folio:     The folio to be mapped.
> > - * @offset:    The byte offset within the folio.
> > - *
> > - * Returns: The virtual address of the mapping
> > - *
> > - * Can be invoked from any context.
> > + * @folio: The folio containing the page.
> > + * @offset: The byte offset within the folio which identifies the page.
> >   *
> >   * Requires careful handling when nesting multiple mappings because the map
> >   * management is stack based. The unmap has to be in the reverse order of
> >   * the map operation:
> >   *
> > - * addr1 = kmap_local_folio(page1, offset1);
> > - * addr2 = kmap_local_folio(page2, offset2);
> > + * addr1 = kmap_local_folio(folio1, offset1);
> > + * addr2 = kmap_local_folio(folio2, offset2);
> >   * ...
> >   * kunmap_local(addr2);
> >   * kunmap_local(addr1);
> > @@ -131,6 +127,9 @@ static inline void *kmap_local_page(struct page *page);
> >   * On HIGHMEM enabled systems mapping a highmem page has the side effect of
> >   * disabling migration in order to keep the virtual address stable across
> >   * preemption. No caller of kmap_local_folio() can rely on this side effect.
> > + *
> > + * Context: Can be invoked from any context.
> > + * Return: The virtual address of @offset.
> >   */
> >  static inline void *kmap_local_folio(struct folio *folio, size_t offset)
> 
> This is clearer, thanks! 
> 
> Maybe just add page to Return: description:
> 
> * Return: The virtual address of page @offset.

No, it really does return the virtual address of @offset.  If you ask
for offset 0x1234 within a (sufficiently large) folio, it will map the
second page of that folio and return the address of the 0x234'th byte
within it.
Vlastimil Babka Aug. 11, 2021, 2:17 p.m. UTC | #5
On 7/15/21 5:35 AM, Matthew Wilcox (Oracle) wrote:
> This allows us to map a portion of a folio.  Callers can only expect
> to access up to the next page boundary.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Reviewed-by: Christoph Hellwig <hch@lst.de>

Acked-by: Vlastimil Babka <vbabka@suse.cz>
diff mbox series

Patch

diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 7902c7d8b55f..d5d6f930ae1d 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -73,6 +73,12 @@  static inline void *kmap_local_page(struct page *page)
 	return __kmap_local_page_prot(page, kmap_prot);
 }
 
+static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+{
+	struct page *page = folio_page(folio, offset / PAGE_SIZE);
+	return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
+}
+
 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
 {
 	return __kmap_local_page_prot(page, prot);
@@ -160,6 +166,11 @@  static inline void *kmap_local_page(struct page *page)
 	return page_address(page);
 }
 
+static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+{
+	return page_address(&folio->page) + offset;
+}
+
 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
 {
 	return kmap_local_page(page);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 8c6e8e996c87..85de3bd0b47d 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -96,6 +96,44 @@  static inline void kmap_flush_unused(void);
  */
 static inline void *kmap_local_page(struct page *page);
 
+/**
+ * kmap_local_folio - Map a page in this folio for temporary usage
+ * @folio:	The folio to be mapped.
+ * @offset:	The byte offset within the folio.
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Can be invoked from any context.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation:
+ *
+ * addr1 = kmap_local_folio(page1, offset1);
+ * addr2 = kmap_local_folio(page2, offset2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While it is significantly faster than kmap() for the higmem case it
+ * comes with restrictions about the pointer validity. Only use when really
+ * necessary.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_folio() can rely on this side effect.
+ */
+static inline void *kmap_local_folio(struct folio *folio, size_t offset);
+
 /**
  * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
  * @page:	Pointer to the page to be mapped