diff mbox

[v5,03/10] swiotlb: Map the buffer if it was unmapped by XPFO

Message ID 20170809200755.11234-4-tycho@docker.com (mailing list archive)
State New, archived
Headers show

Commit Message

Tycho Andersen Aug. 9, 2017, 8:07 p.m. UTC
From: Juerg Haefliger <juerg.haefliger@hpe.com>

Signed-off-by: Juerg Haefliger <juerg.haefliger@canonical.com>
Tested-by: Tycho Andersen <tycho@docker.com>
---
 include/linux/xpfo.h | 4 ++++
 lib/swiotlb.c        | 3 ++-
 mm/xpfo.c            | 9 +++++++++
 3 files changed, 15 insertions(+), 1 deletion(-)

Comments

Konrad Rzeszutek Wilk Aug. 10, 2017, 1:01 p.m. UTC | #1
On Wed, Aug 09, 2017 at 02:07:48PM -0600, Tycho Andersen wrote:
> From: Juerg Haefliger <juerg.haefliger@hpe.com>
> 
> Signed-off-by: Juerg Haefliger <juerg.haefliger@canonical.com>
> Tested-by: Tycho Andersen <tycho@docker.com>
> ---
>  include/linux/xpfo.h | 4 ++++
>  lib/swiotlb.c        | 3 ++-
>  mm/xpfo.c            | 9 +++++++++
>  3 files changed, 15 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/xpfo.h b/include/linux/xpfo.h
> index 1ff2d1976837..6b61f7b820f4 100644
> --- a/include/linux/xpfo.h
> +++ b/include/linux/xpfo.h
> @@ -27,6 +27,8 @@ void xpfo_kunmap(void *kaddr, struct page *page);
>  void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp);
>  void xpfo_free_pages(struct page *page, int order);
>  
> +bool xpfo_page_is_unmapped(struct page *page);
> +
>  #else /* !CONFIG_XPFO */
>  
>  static inline void xpfo_kmap(void *kaddr, struct page *page) { }
> @@ -34,6 +36,8 @@ static inline void xpfo_kunmap(void *kaddr, struct page *page) { }
>  static inline void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp) { }
>  static inline void xpfo_free_pages(struct page *page, int order) { }
>  
> +static inline bool xpfo_page_is_unmapped(struct page *page) { return false; }
> +
>  #endif /* CONFIG_XPFO */
>  
>  #endif /* _LINUX_XPFO_H */
> diff --git a/lib/swiotlb.c b/lib/swiotlb.c
> index a8d74a733a38..d4fee5ca2d9e 100644
> --- a/lib/swiotlb.c
> +++ b/lib/swiotlb.c
> @@ -420,8 +420,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
>  {
>  	unsigned long pfn = PFN_DOWN(orig_addr);
>  	unsigned char *vaddr = phys_to_virt(tlb_addr);
> +	struct page *page = pfn_to_page(pfn);
>  
> -	if (PageHighMem(pfn_to_page(pfn))) {
> +	if (PageHighMem(page) || xpfo_page_is_unmapped(page)) {
>  		/* The buffer does not have a mapping.  Map it in and copy */
>  		unsigned int offset = orig_addr & ~PAGE_MASK;
>  		char *buffer;
> diff --git a/mm/xpfo.c b/mm/xpfo.c
> index 3cd45f68b5ad..3f305f31a072 100644
> --- a/mm/xpfo.c
> +++ b/mm/xpfo.c
> @@ -206,3 +206,12 @@ void xpfo_kunmap(void *kaddr, struct page *page)
>  	spin_unlock_irqrestore(&xpfo->maplock, flags);
>  }
>  EXPORT_SYMBOL(xpfo_kunmap);
> +
> +inline bool xpfo_page_is_unmapped(struct page *page)
> +{
> +	if (!static_branch_unlikely(&xpfo_inited))
> +		return false;
> +
> +	return test_bit(XPFO_PAGE_UNMAPPED, &lookup_xpfo(page)->flags);
> +}
> +EXPORT_SYMBOL(xpfo_page_is_unmapped);

How can it be inline and 'EXPORT_SYMBOL' ? And why make it inline? It
surely does not need to be access that often?

> -- 
> 2.11.0
>
Tycho Andersen Aug. 10, 2017, 4:22 p.m. UTC | #2
On Thu, Aug 10, 2017 at 09:01:06AM -0400, Konrad Rzeszutek Wilk wrote:
> On Wed, Aug 09, 2017 at 02:07:48PM -0600, Tycho Andersen wrote:
> > +inline bool xpfo_page_is_unmapped(struct page *page)
> > +{
> > +	if (!static_branch_unlikely(&xpfo_inited))
> > +		return false;
> > +
> > +	return test_bit(XPFO_PAGE_UNMAPPED, &lookup_xpfo(page)->flags);
> > +}
> > +EXPORT_SYMBOL(xpfo_page_is_unmapped);
> 
> How can it be inline and 'EXPORT_SYMBOL' ? And why make it inline? It
> surely does not need to be access that often?

Good point. I'll drop inline from the next version, thanks!

Tycho
Dave Hansen Sept. 20, 2017, 4:19 p.m. UTC | #3
On 08/09/2017 01:07 PM, Tycho Andersen wrote:
> --- a/lib/swiotlb.c
> +++ b/lib/swiotlb.c
> @@ -420,8 +420,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
>  {
>  	unsigned long pfn = PFN_DOWN(orig_addr);
>  	unsigned char *vaddr = phys_to_virt(tlb_addr);
> +	struct page *page = pfn_to_page(pfn);
>  
> -	if (PageHighMem(pfn_to_page(pfn))) {
> +	if (PageHighMem(page) || xpfo_page_is_unmapped(page)) {
>  		/* The buffer does not have a mapping.  Map it in and copy */
>  		unsigned int offset = orig_addr & ~PAGE_MASK;
>  		char *buffer;

This is a little scary.  I wonder how many more of these are in the
kernel, like:

> static inline void *skcipher_map(struct scatter_walk *walk)
> {
>         struct page *page = scatterwalk_page(walk);
> 
>         return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
>                offset_in_page(walk->offset);
> }

Is there any better way to catch these?  Like, can we add some debugging
to check for XPFO pages in __va()?
Tycho Andersen Sept. 20, 2017, 10:47 p.m. UTC | #4
On Wed, Sep 20, 2017 at 09:19:56AM -0700, Dave Hansen wrote:
> On 08/09/2017 01:07 PM, Tycho Andersen wrote:
> > --- a/lib/swiotlb.c
> > +++ b/lib/swiotlb.c
> > @@ -420,8 +420,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
> >  {
> >  	unsigned long pfn = PFN_DOWN(orig_addr);
> >  	unsigned char *vaddr = phys_to_virt(tlb_addr);
> > +	struct page *page = pfn_to_page(pfn);
> >  
> > -	if (PageHighMem(pfn_to_page(pfn))) {
> > +	if (PageHighMem(page) || xpfo_page_is_unmapped(page)) {
> >  		/* The buffer does not have a mapping.  Map it in and copy */
> >  		unsigned int offset = orig_addr & ~PAGE_MASK;
> >  		char *buffer;
> 
> This is a little scary.  I wonder how many more of these are in the
> kernel, like:

I don't know, but I assume several :)

> > static inline void *skcipher_map(struct scatter_walk *walk)
> > {
> >         struct page *page = scatterwalk_page(walk);
> > 
> >         return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
> >                offset_in_page(walk->offset);
> > }
> 
> Is there any better way to catch these?  Like, can we add some debugging
> to check for XPFO pages in __va()?

Yes, and perhaps also a debugging check in PageHighMem? Would __va
have caught either of the two cases you've pointed out?

Tycho
Dave Hansen Sept. 20, 2017, 11:25 p.m. UTC | #5
On 09/20/2017 03:47 PM, Tycho Andersen wrote:
> 
>>> static inline void *skcipher_map(struct scatter_walk *walk)
>>> {
>>>         struct page *page = scatterwalk_page(walk);
>>>
>>>         return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
>>>                offset_in_page(walk->offset);
>>> }
>> Is there any better way to catch these?  Like, can we add some debugging
>> to check for XPFO pages in __va()?
> Yes, and perhaps also a debugging check in PageHighMem?

I'm not sure what PageHighMem() would check.  It's OK to use as long as
you don't depend on the contents of the page.
		
> Would __va have caught either of the two cases you've pointed out?
Yes.  __va() is what is eventually called by lowmem_page_address(),
which is only OK to call on things that are actually mapped into the kernel.
diff mbox

Patch

diff --git a/include/linux/xpfo.h b/include/linux/xpfo.h
index 1ff2d1976837..6b61f7b820f4 100644
--- a/include/linux/xpfo.h
+++ b/include/linux/xpfo.h
@@ -27,6 +27,8 @@  void xpfo_kunmap(void *kaddr, struct page *page);
 void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp);
 void xpfo_free_pages(struct page *page, int order);
 
+bool xpfo_page_is_unmapped(struct page *page);
+
 #else /* !CONFIG_XPFO */
 
 static inline void xpfo_kmap(void *kaddr, struct page *page) { }
@@ -34,6 +36,8 @@  static inline void xpfo_kunmap(void *kaddr, struct page *page) { }
 static inline void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp) { }
 static inline void xpfo_free_pages(struct page *page, int order) { }
 
+static inline bool xpfo_page_is_unmapped(struct page *page) { return false; }
+
 #endif /* CONFIG_XPFO */
 
 #endif /* _LINUX_XPFO_H */
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index a8d74a733a38..d4fee5ca2d9e 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -420,8 +420,9 @@  static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
 {
 	unsigned long pfn = PFN_DOWN(orig_addr);
 	unsigned char *vaddr = phys_to_virt(tlb_addr);
+	struct page *page = pfn_to_page(pfn);
 
-	if (PageHighMem(pfn_to_page(pfn))) {
+	if (PageHighMem(page) || xpfo_page_is_unmapped(page)) {
 		/* The buffer does not have a mapping.  Map it in and copy */
 		unsigned int offset = orig_addr & ~PAGE_MASK;
 		char *buffer;
diff --git a/mm/xpfo.c b/mm/xpfo.c
index 3cd45f68b5ad..3f305f31a072 100644
--- a/mm/xpfo.c
+++ b/mm/xpfo.c
@@ -206,3 +206,12 @@  void xpfo_kunmap(void *kaddr, struct page *page)
 	spin_unlock_irqrestore(&xpfo->maplock, flags);
 }
 EXPORT_SYMBOL(xpfo_kunmap);
+
+inline bool xpfo_page_is_unmapped(struct page *page)
+{
+	if (!static_branch_unlikely(&xpfo_inited))
+		return false;
+
+	return test_bit(XPFO_PAGE_UNMAPPED, &lookup_xpfo(page)->flags);
+}
+EXPORT_SYMBOL(xpfo_page_is_unmapped);