diff mbox series

[RFC,03/12] s390/mm: force swiotlb for protected virtualization

Message ID 20190404231622.52531-4-pasic@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series s390: virtio: support protected virtualization | expand

Commit Message

Halil Pasic April 4, 2019, 11:16 p.m. UTC
On s390 protected virtualization guests also have to use bounce I/O
buffers.  That requires some plumbing.

Let us make sure any device using DMA API accordingly is spared from the
problems that hypervisor attempting I/O to a non-shared secure page would
bring.

Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
---
 arch/s390/Kconfig                   |  4 ++++
 arch/s390/include/asm/Kbuild        |  1 -
 arch/s390/include/asm/dma-mapping.h | 13 +++++++++++
 arch/s390/include/asm/mem_encrypt.h | 18 +++++++++++++++
 arch/s390/mm/init.c                 | 44 +++++++++++++++++++++++++++++++++++++
 5 files changed, 79 insertions(+), 1 deletion(-)
 create mode 100644 arch/s390/include/asm/dma-mapping.h
 create mode 100644 arch/s390/include/asm/mem_encrypt.h

Comments

Cornelia Huck April 9, 2019, 10:16 a.m. UTC | #1
On Fri,  5 Apr 2019 01:16:13 +0200
Halil Pasic <pasic@linux.ibm.com> wrote:

> On s390 protected virtualization guests also have to use bounce I/O
> buffers.  That requires some plumbing.
> 
> Let us make sure any device using DMA API accordingly is spared from the
> problems that hypervisor attempting I/O to a non-shared secure page would
> bring.

I have problems parsing this sentence :(

Do you mean that we want to exclude pages for I/O from encryption?

> 
> Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
> ---
>  arch/s390/Kconfig                   |  4 ++++
>  arch/s390/include/asm/Kbuild        |  1 -
>  arch/s390/include/asm/dma-mapping.h | 13 +++++++++++
>  arch/s390/include/asm/mem_encrypt.h | 18 +++++++++++++++
>  arch/s390/mm/init.c                 | 44 +++++++++++++++++++++++++++++++++++++
>  5 files changed, 79 insertions(+), 1 deletion(-)
>  create mode 100644 arch/s390/include/asm/dma-mapping.h
>  create mode 100644 arch/s390/include/asm/mem_encrypt.h

(...)

> @@ -126,6 +129,45 @@ void mark_rodata_ro(void)
>  	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
>  }
>  
> +int set_memory_encrypted(unsigned long addr, int numpages)
> +{
> +	/* also called for the swiotlb bounce buffers, make all pages shared */
> +	/* TODO: do ultravisor calls */
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> +
> +int set_memory_decrypted(unsigned long addr, int numpages)
> +{
> +	/* also called for the swiotlb bounce buffers, make all pages shared */
> +	/* TODO: do ultravisor calls */
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(set_memory_decrypted);
> +
> +/* are we a protected virtualization guest? */
> +bool sev_active(void)
> +{
> +	/*
> +	 * TODO: Do proper detection using ultravisor, for now let us fake we
> +	 *  have it so the code gets exercised.

That's the swiotlb stuff, right?

(The patches will obviously need some reordering before it is actually
getting merged.)

> +	 */
> +	return true;
> +}
> +EXPORT_SYMBOL_GPL(sev_active);
> +
> +/* protected virtualization */
> +static void pv_init(void)
> +{
> +	if (!sev_active())
> +		return;
> +
> +	/* make sure bounce buffers are shared */
> +	swiotlb_init(1);
> +	swiotlb_update_mem_attributes();
> +	swiotlb_force = SWIOTLB_FORCE;
> +}
> +
>  void __init mem_init(void)
>  {
>  	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +176,8 @@ void __init mem_init(void)
>  	set_max_mapnr(max_low_pfn);
>          high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>  
> +	pv_init();
> +
>  	/* Setup guest page hinting */
>  	cmma_init();
>
Halil Pasic April 9, 2019, 10:54 a.m. UTC | #2
On Tue, 9 Apr 2019 12:16:47 +0200
Cornelia Huck <cohuck@redhat.com> wrote:

> On Fri,  5 Apr 2019 01:16:13 +0200
> Halil Pasic <pasic@linux.ibm.com> wrote:
> 
> > On s390 protected virtualization guests also have to use bounce I/O
> > buffers.  That requires some plumbing.
> > 
> > Let us make sure any device using DMA API accordingly is spared from the
                    ^,                                   ^,
Maybe this helps...

> > problems that hypervisor attempting I/O to a non-shared secure page would
> > bring.
> 
> I have problems parsing this sentence :(
> 
> Do you mean that we want to exclude pages for I/O from encryption?

The intended meaning is:
* Devices that do use DMA API (properly) to get get/map the memory
  that is used to talk to hypervisor should be OK with PV (protected
  virtualizaton). I.e. for such devices PV or not PV is basically
  transparent.
* But if a device does not use DMA API for the memory that is used to
  talk to the hypervisor this patch won't help.

And yes the gist of it is: memory accessed by the hypervisor needs to
be on pages excluded from protection (which in case of PV is technically
not encryption).

Does that help?

> 
> > 
> > Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
> > ---
> >  arch/s390/Kconfig                   |  4 ++++
> >  arch/s390/include/asm/Kbuild        |  1 -
> >  arch/s390/include/asm/dma-mapping.h | 13 +++++++++++
> >  arch/s390/include/asm/mem_encrypt.h | 18 +++++++++++++++
> >  arch/s390/mm/init.c                 | 44 +++++++++++++++++++++++++++++++++++++
> >  5 files changed, 79 insertions(+), 1 deletion(-)
> >  create mode 100644 arch/s390/include/asm/dma-mapping.h
> >  create mode 100644 arch/s390/include/asm/mem_encrypt.h
> 
> (...)
> 
> > @@ -126,6 +129,45 @@ void mark_rodata_ro(void)
> >  	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
> >  }
> >  
> > +int set_memory_encrypted(unsigned long addr, int numpages)
> > +{
> > +	/* also called for the swiotlb bounce buffers, make all pages shared */
> > +	/* TODO: do ultravisor calls */
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> > +
> > +int set_memory_decrypted(unsigned long addr, int numpages)
> > +{
> > +	/* also called for the swiotlb bounce buffers, make all pages shared */
> > +	/* TODO: do ultravisor calls */
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(set_memory_decrypted);
> > +
> > +/* are we a protected virtualization guest? */
> > +bool sev_active(void)
> > +{
> > +	/*
> > +	 * TODO: Do proper detection using ultravisor, for now let us fake we
> > +	 *  have it so the code gets exercised.
> 
> That's the swiotlb stuff, right?
> 

You mean 'That' == code to get exercised == 'swiotlb stuff'?

If yes then the answer is kind of. The swiotlb (i.e. bounce buffers) is
when we map (like we map the buffers pointed to by the descriptors in
case of the virtio ring). The other part of it is the memory allocated
as DMA coherent (i.e. the virtio ring (desc, avail used) itself).

> (The patches will obviously need some reordering before it is actually
> getting merged.)
> 

What do you mean by reordering?

One reason why this is an early RFC is the missing dependency (i.e. the
stuff described by most of the TODO comments). As pointed out in the
cover letter. Another reason is that I wanted to avoid putting a lots of
effort into fine-polishing before clarifying the getting some feedback
on the basics from the community. ;)


> > +	 */
> > +	return true;
> > +}
> > +EXPORT_SYMBOL_GPL(sev_active);
> > +
> > +/* protected virtualization */
> > +static void pv_init(void)
> > +{
> > +	if (!sev_active())
> > +		return;
> > +
> > +	/* make sure bounce buffers are shared */
> > +	swiotlb_init(1);
> > +	swiotlb_update_mem_attributes();
> > +	swiotlb_force = SWIOTLB_FORCE;
> > +}
> > +
> >  void __init mem_init(void)
> >  {
> >  	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> > @@ -134,6 +176,8 @@ void __init mem_init(void)
> >  	set_max_mapnr(max_low_pfn);
> >          high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
> >  
> > +	pv_init();
> > +
> >  	/* Setup guest page hinting */
> >  	cmma_init();
> >  
>
Christoph Hellwig April 9, 2019, 12:22 p.m. UTC | #3
> +++ b/arch/s390/include/asm/dma-mapping.h
> @@ -0,0 +1,13 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_S390_DMA_MAPPING_H
> +#define _ASM_S390_DMA_MAPPING_H
> +
> +#include <linux/dma-contiguous.h>
> +
> +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
> +{
> +	return NULL;
> +}
> +
> +#endif /* _ASM_S390_DMA_MAPPING_H */
> +

Congratulations!  You ust create an entirely pointless duplicate of
include/asm-generic/dma-mapping.h.
Halil Pasic April 9, 2019, 12:39 p.m. UTC | #4
On Tue, 9 Apr 2019 05:22:41 -0700
Christoph Hellwig <hch@infradead.org> wrote:

> > +++ b/arch/s390/include/asm/dma-mapping.h
> > @@ -0,0 +1,13 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +#ifndef _ASM_S390_DMA_MAPPING_H
> > +#define _ASM_S390_DMA_MAPPING_H
> > +
> > +#include <linux/dma-contiguous.h>
> > +
> > +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
> > +{
> > +	return NULL;
> > +}
> > +
> > +#endif /* _ASM_S390_DMA_MAPPING_H */
> > +
> 
> Congratulations!  You ust create an entirely pointless duplicate of
> include/asm-generic/dma-mapping.h.
> 

Right! Thanks for spotting. There was a stage in development when my
arch/s390/include/asm/dma-mapping.h looked quite different, but I did
end up with an entirely pointless duplicate indeed.

I will fix it for v1.

Regards,
Halil
Cornelia Huck April 9, 2019, 5:18 p.m. UTC | #5
On Tue, 9 Apr 2019 12:54:16 +0200
Halil Pasic <pasic@linux.ibm.com> wrote:

> On Tue, 9 Apr 2019 12:16:47 +0200
> Cornelia Huck <cohuck@redhat.com> wrote:
> 
> > On Fri,  5 Apr 2019 01:16:13 +0200
> > Halil Pasic <pasic@linux.ibm.com> wrote:
> >   
> > > On s390 protected virtualization guests also have to use bounce I/O
> > > buffers.  That requires some plumbing.
> > > 
> > > Let us make sure any device using DMA API accordingly is spared from the  
>                     ^,                                   ^,
> Maybe this helps...
> 
> > > problems that hypervisor attempting I/O to a non-shared secure page would
> > > bring.  
> > 
> > I have problems parsing this sentence :(
> > 
> > Do you mean that we want to exclude pages for I/O from encryption?  
> 
> The intended meaning is:
> * Devices that do use DMA API (properly) to get get/map the memory
>   that is used to talk to hypervisor should be OK with PV (protected
>   virtualizaton). I.e. for such devices PV or not PV is basically
>   transparent.
> * But if a device does not use DMA API for the memory that is used to
>   talk to the hypervisor this patch won't help.
> 
> And yes the gist of it is: memory accessed by the hypervisor needs to
> be on pages excluded from protection (which in case of PV is technically
> not encryption).
> 
> Does that help?

Hm, let me sleep on this. The original sentence was a bit too
convoluted for me...

> 
> >   
> > > 
> > > Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
> > > ---
> > >  arch/s390/Kconfig                   |  4 ++++
> > >  arch/s390/include/asm/Kbuild        |  1 -
> > >  arch/s390/include/asm/dma-mapping.h | 13 +++++++++++
> > >  arch/s390/include/asm/mem_encrypt.h | 18 +++++++++++++++
> > >  arch/s390/mm/init.c                 | 44 +++++++++++++++++++++++++++++++++++++
> > >  5 files changed, 79 insertions(+), 1 deletion(-)
> > >  create mode 100644 arch/s390/include/asm/dma-mapping.h
> > >  create mode 100644 arch/s390/include/asm/mem_encrypt.h  
> > 
> > (...)
> >   
> > > @@ -126,6 +129,45 @@ void mark_rodata_ro(void)
> > >  	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
> > >  }
> > >  
> > > +int set_memory_encrypted(unsigned long addr, int numpages)
> > > +{
> > > +	/* also called for the swiotlb bounce buffers, make all pages shared */
> > > +	/* TODO: do ultravisor calls */
> > > +	return 0;
> > > +}
> > > +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> > > +
> > > +int set_memory_decrypted(unsigned long addr, int numpages)
> > > +{
> > > +	/* also called for the swiotlb bounce buffers, make all pages shared */
> > > +	/* TODO: do ultravisor calls */
> > > +	return 0;
> > > +}
> > > +EXPORT_SYMBOL_GPL(set_memory_decrypted);
> > > +
> > > +/* are we a protected virtualization guest? */
> > > +bool sev_active(void)
> > > +{
> > > +	/*
> > > +	 * TODO: Do proper detection using ultravisor, for now let us fake we
> > > +	 *  have it so the code gets exercised.  
> > 
> > That's the swiotlb stuff, right?
> >   
> 
> You mean 'That' == code to get exercised == 'swiotlb stuff'?
> 
> If yes then the answer is kind of. The swiotlb (i.e. bounce buffers) is
> when we map (like we map the buffers pointed to by the descriptors in
> case of the virtio ring). The other part of it is the memory allocated
> as DMA coherent (i.e. the virtio ring (desc, avail used) itself).

Ok.

> 
> > (The patches will obviously need some reordering before it is actually
> > getting merged.)
> >   
> 
> What do you mean by reordering?
> 
> One reason why this is an early RFC is the missing dependency (i.e. the
> stuff described by most of the TODO comments). As pointed out in the
> cover letter. Another reason is that I wanted to avoid putting a lots of
> effort into fine-polishing before clarifying the getting some feedback
> on the basics from the community. ;)

Sure. I'm just reading top-down and unconditionally enabling this is
something that obviously needs to be changed in later iterations ;)

> 
> 
> > > +	 */
> > > +	return true;
> > > +}
> > > +EXPORT_SYMBOL_GPL(sev_active);
> > > +
> > > +/* protected virtualization */
> > > +static void pv_init(void)
> > > +{
> > > +	if (!sev_active())
> > > +		return;
> > > +
> > > +	/* make sure bounce buffers are shared */
> > > +	swiotlb_init(1);
> > > +	swiotlb_update_mem_attributes();
> > > +	swiotlb_force = SWIOTLB_FORCE;
> > > +}
> > > +
> > >  void __init mem_init(void)
> > >  {
> > >  	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> > > @@ -134,6 +176,8 @@ void __init mem_init(void)
> > >  	set_max_mapnr(max_low_pfn);
> > >          high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
> > >  
> > > +	pv_init();
> > > +
> > >  	/* Setup guest page hinting */
> > >  	cmma_init();
> > >    
> >   
>
diff mbox series

Patch

diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b6e3d0653002..46c69283a67b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -1,4 +1,7 @@ 
 # SPDX-License-Identifier: GPL-2.0
+config ARCH_HAS_MEM_ENCRYPT
+        def_bool y
+
 config MMU
 	def_bool y
 
@@ -190,6 +193,7 @@  config S390
 	select ARCH_HAS_SCALED_CPUTIME
 	select VIRT_TO_BUS
 	select HAVE_NMI
+	select SWIOTLB
 
 
 config SCHED_OMIT_FRAME_POINTER
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 12d77cb11fe5..ba55cd472950 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -8,7 +8,6 @@  generic-y += asm-offsets.h
 generic-y += cacheflush.h
 generic-y += device.h
 generic-y += dma-contiguous.h
-generic-y += dma-mapping.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += export.h
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..8985da6ecdfd
--- /dev/null
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -0,0 +1,13 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_DMA_MAPPING_H
+#define _ASM_S390_DMA_MAPPING_H
+
+#include <linux/dma-contiguous.h>
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+	return NULL;
+}
+
+#endif /* _ASM_S390_DMA_MAPPING_H */
+
diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h
new file mode 100644
index 000000000000..0898c09a888c
--- /dev/null
+++ b/arch/s390/include/asm/mem_encrypt.h
@@ -0,0 +1,18 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask	0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif	/* __ASSEMBLY__ */
+
+#endif	/* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3e82f66d5c61..a47bd4998d24 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -18,6 +18,7 @@ 
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
+#include <linux/swiotlb.h>
 #include <linux/smp.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
@@ -29,6 +30,7 @@ 
 #include <linux/export.h>
 #include <linux/cma.h>
 #include <linux/gfp.h>
+#include <linux/dma-mapping.h>
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/pgtable.h>
@@ -42,6 +44,7 @@ 
 #include <asm/sclp.h>
 #include <asm/set_memory.h>
 #include <asm/kasan.h>
+#include <asm/dma-mapping.h>
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
 
@@ -126,6 +129,45 @@  void mark_rodata_ro(void)
 	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
 }
 
+int set_memory_encrypted(unsigned long addr, int numpages)
+{
+	/* also called for the swiotlb bounce buffers, make all pages shared */
+	/* TODO: do ultravisor calls */
+	return 0;
+}
+EXPORT_SYMBOL_GPL(set_memory_encrypted);
+
+int set_memory_decrypted(unsigned long addr, int numpages)
+{
+	/* also called for the swiotlb bounce buffers, make all pages shared */
+	/* TODO: do ultravisor calls */
+	return 0;
+}
+EXPORT_SYMBOL_GPL(set_memory_decrypted);
+
+/* are we a protected virtualization guest? */
+bool sev_active(void)
+{
+	/*
+	 * TODO: Do proper detection using ultravisor, for now let us fake we
+	 *  have it so the code gets exercised.
+	 */
+	return true;
+}
+EXPORT_SYMBOL_GPL(sev_active);
+
+/* protected virtualization */
+static void pv_init(void)
+{
+	if (!sev_active())
+		return;
+
+	/* make sure bounce buffers are shared */
+	swiotlb_init(1);
+	swiotlb_update_mem_attributes();
+	swiotlb_force = SWIOTLB_FORCE;
+}
+
 void __init mem_init(void)
 {
 	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -134,6 +176,8 @@  void __init mem_init(void)
 	set_max_mapnr(max_low_pfn);
         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
+	pv_init();
+
 	/* Setup guest page hinting */
 	cmma_init();