[04/10] s390/mm: force swiotlb for protected virtualization
diff mbox series

Message ID 20190426183245.37939-5-pasic@linux.ibm.com
State New
Headers show
Series
  • s390: virtio: support protected virtualization
Related show

Commit Message

Halil Pasic April 26, 2019, 6:32 p.m. UTC
On s390, protected virtualization guests have to use bounced I/O
buffers.  That requires some plumbing.

Let us make sure, any device that uses DMA API with direct ops correctly
is spared from the problems, that a hypervisor attempting I/O to a
non-shared page would bring.

Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
---
 arch/s390/Kconfig                   |  4 +++
 arch/s390/include/asm/mem_encrypt.h | 18 +++++++++++++
 arch/s390/mm/init.c                 | 50 +++++++++++++++++++++++++++++++++++++
 3 files changed, 72 insertions(+)
 create mode 100644 arch/s390/include/asm/mem_encrypt.h

Comments

Christoph Hellwig April 26, 2019, 7:27 p.m. UTC | #1
On Fri, Apr 26, 2019 at 08:32:39PM +0200, Halil Pasic wrote:
> +EXPORT_SYMBOL_GPL(set_memory_encrypted);

> +EXPORT_SYMBOL_GPL(set_memory_decrypted);

> +EXPORT_SYMBOL_GPL(sev_active);

Why do you export these?  I know x86 exports those as well, but
it shoudn't be needed there either.
Halil Pasic April 29, 2019, 1:59 p.m. UTC | #2
On Fri, 26 Apr 2019 12:27:11 -0700
Christoph Hellwig <hch@infradead.org> wrote:

> On Fri, Apr 26, 2019 at 08:32:39PM +0200, Halil Pasic wrote:
> > +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> 
> > +EXPORT_SYMBOL_GPL(set_memory_decrypted);
> 
> > +EXPORT_SYMBOL_GPL(sev_active);
> 
> Why do you export these?  I know x86 exports those as well, but
> it shoudn't be needed there either.
> 

I export these to be in line with the x86 implementation (which
is the original and seems to be the only one at the moment). I assumed
that 'exported or not' is kind of a part of the interface definition.
Honestly, I did not give it too much thought.

For x86 set_memory(en|de)crypted got exported by 95cf9264d5f3 "x86, drm,
fbdev: Do not specify encrypted memory for video mappings" (Tom
Lendacky, 2017-07-17). With CONFIG_FB_VGA16=m seems to be necessary for x84.

If the consensus is don't export: I won't. I'm fine one way or the other.
@Christian, what is your take on this?

Thank you very much!

Regards,
Halil
Christian Borntraeger April 29, 2019, 2:05 p.m. UTC | #3
On 29.04.19 15:59, Halil Pasic wrote:
> On Fri, 26 Apr 2019 12:27:11 -0700
> Christoph Hellwig <hch@infradead.org> wrote:
> 
>> On Fri, Apr 26, 2019 at 08:32:39PM +0200, Halil Pasic wrote:
>>> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
>>
>>> +EXPORT_SYMBOL_GPL(set_memory_decrypted);
>>
>>> +EXPORT_SYMBOL_GPL(sev_active);
>>
>> Why do you export these?  I know x86 exports those as well, but
>> it shoudn't be needed there either.
>>
> 
> I export these to be in line with the x86 implementation (which
> is the original and seems to be the only one at the moment). I assumed
> that 'exported or not' is kind of a part of the interface definition.
> Honestly, I did not give it too much thought.
> 
> For x86 set_memory(en|de)crypted got exported by 95cf9264d5f3 "x86, drm,
> fbdev: Do not specify encrypted memory for video mappings" (Tom
> Lendacky, 2017-07-17). With CONFIG_FB_VGA16=m seems to be necessary for x84.
> 
> If the consensus is don't export: I won't. I'm fine one way or the other.
> @Christian, what is your take on this?

If we do not need it today for anything (e.g. virtio-gpu) then we can get rid
of the exports (and introduce them when necessary).
> 
> Thank you very much!
> 
> Regards,
> Halil
> 
>
Claudio Imbrenda May 8, 2019, 1:15 p.m. UTC | #4
On Fri, 26 Apr 2019 20:32:39 +0200
Halil Pasic <pasic@linux.ibm.com> wrote:

> On s390, protected virtualization guests have to use bounced I/O
> buffers.  That requires some plumbing.
> 
> Let us make sure, any device that uses DMA API with direct ops
> correctly is spared from the problems, that a hypervisor attempting
> I/O to a non-shared page would bring.
> 
> Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
> ---
>  arch/s390/Kconfig                   |  4 +++
>  arch/s390/include/asm/mem_encrypt.h | 18 +++++++++++++
>  arch/s390/mm/init.c                 | 50
> +++++++++++++++++++++++++++++++++++++ 3 files changed, 72
> insertions(+) create mode 100644 arch/s390/include/asm/mem_encrypt.h
> 
> diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
> index 1c3fcf19c3af..5500d05d4d53 100644
> --- a/arch/s390/Kconfig
> +++ b/arch/s390/Kconfig
> @@ -1,4 +1,7 @@
>  # SPDX-License-Identifier: GPL-2.0
> +config ARCH_HAS_MEM_ENCRYPT
> +        def_bool y
> +
>  config MMU
>  	def_bool y
>  
> @@ -191,6 +194,7 @@ config S390
>  	select ARCH_HAS_SCALED_CPUTIME
>  	select VIRT_TO_BUS
>  	select HAVE_NMI
> +	select SWIOTLB
>  
>  
>  config SCHED_OMIT_FRAME_POINTER
> diff --git a/arch/s390/include/asm/mem_encrypt.h
> b/arch/s390/include/asm/mem_encrypt.h new file mode 100644
> index 000000000000..0898c09a888c
> --- /dev/null
> +++ b/arch/s390/include/asm/mem_encrypt.h
> @@ -0,0 +1,18 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef S390_MEM_ENCRYPT_H__
> +#define S390_MEM_ENCRYPT_H__
> +
> +#ifndef __ASSEMBLY__
> +
> +#define sme_me_mask	0ULL

This is rather ugly, but I understand why it's there

> +
> +static inline bool sme_active(void) { return false; }
> +extern bool sev_active(void);
> +
> +int set_memory_encrypted(unsigned long addr, int numpages);
> +int set_memory_decrypted(unsigned long addr, int numpages);
> +
> +#endif	/* __ASSEMBLY__ */
> +
> +#endif	/* S390_MEM_ENCRYPT_H__ */
> +
> diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> index 3e82f66d5c61..7e3cbd15dcfa 100644
> --- a/arch/s390/mm/init.c
> +++ b/arch/s390/mm/init.c
> @@ -18,6 +18,7 @@
>  #include <linux/mman.h>
>  #include <linux/mm.h>
>  #include <linux/swap.h>
> +#include <linux/swiotlb.h>
>  #include <linux/smp.h>
>  #include <linux/init.h>
>  #include <linux/pagemap.h>
> @@ -29,6 +30,7 @@
>  #include <linux/export.h>
>  #include <linux/cma.h>
>  #include <linux/gfp.h>
> +#include <linux/dma-mapping.h>
>  #include <asm/processor.h>
>  #include <linux/uaccess.h>
>  #include <asm/pgtable.h>
> @@ -42,6 +44,8 @@
>  #include <asm/sclp.h>
>  #include <asm/set_memory.h>
>  #include <asm/kasan.h>
> +#include <asm/dma-mapping.h>
> +#include <asm/uv.h>
>  
>  pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
>  
> @@ -126,6 +130,50 @@ void mark_rodata_ro(void)
>  	pr_info("Write protected read-only-after-init data: %luk\n",
> size >> 10); }
>  
> +int set_memory_encrypted(unsigned long addr, int numpages)
> +{
> +	int i;
> +
> +	/* make all pages shared, (swiotlb, dma_free) */

this is a copypaste typo, I think? (should be UNshared?)
also, it doesn't make ALL pages unshared, but only those specified in
the parameters

with this fixed:
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>

> +	for (i = 0; i < numpages; ++i) {
> +		uv_remove_shared(addr);
> +		addr += PAGE_SIZE;
> +	}
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> +
> +int set_memory_decrypted(unsigned long addr, int numpages)
> +{
> +	int i;
> +	/* make all pages shared (swiotlb, dma_alloca) */

same here with ALL

> +	for (i = 0; i < numpages; ++i) {
> +		uv_set_shared(addr);
> +		addr += PAGE_SIZE;
> +	}
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(set_memory_decrypted);
> +
> +/* are we a protected virtualization guest? */
> +bool sev_active(void)

this is also ugly. the correct solution would be probably to refactor
everything, including all the AMD SEV code.... let's not go there

> +{
> +	return is_prot_virt_guest();
> +}
> +EXPORT_SYMBOL_GPL(sev_active);
> +
> +/* protected virtualization */
> +static void pv_init(void)
> +{
> +	if (!sev_active())

can't you just use is_prot_virt_guest here?

> +		return;
> +
> +	/* make sure bounce buffers are shared */
> +	swiotlb_init(1);
> +	swiotlb_update_mem_attributes();
> +	swiotlb_force = SWIOTLB_FORCE;
> +}
> +
>  void __init mem_init(void)
>  {
>  	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +182,8 @@ void __init mem_init(void)
>  	set_max_mapnr(max_low_pfn);
>          high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>  
> +	pv_init();
> +
>  	/* Setup guest page hinting */
>  	cmma_init();
>
Jason J. Herne May 9, 2019, 6:05 p.m. UTC | #5
> Subject: [PATCH 04/10] s390/mm: force swiotlb for protected virtualization
> Date: Fri, 26 Apr 2019 20:32:39 +0200
> From: Halil Pasic <pasic@linux.ibm.com>
> To: kvm@vger.kernel.org, linux-s390@vger.kernel.org, Cornelia Huck <cohuck@redhat.com>, 
> Martin Schwidefsky <schwidefsky@de.ibm.com>, Sebastian Ott <sebott@linux.ibm.com>
> CC: Halil Pasic <pasic@linux.ibm.com>, virtualization@lists.linux-foundation.org, Michael 
> S. Tsirkin <mst@redhat.com>, Christoph Hellwig <hch@infradead.org>, Thomas Huth 
> <thuth@redhat.com>, Christian Borntraeger <borntraeger@de.ibm.com>, Viktor Mihajlovski 
> <mihajlov@linux.ibm.com>, Vasily Gorbik <gor@linux.ibm.com>, Janosch Frank 
> <frankja@linux.ibm.com>, Claudio Imbrenda <imbrenda@linux.ibm.com>, Farhan Ali 
> <alifm@linux.ibm.com>, Eric Farman <farman@linux.ibm.com>
> 
> On s390, protected virtualization guests have to use bounced I/O
> buffers.  That requires some plumbing.
> 
> Let us make sure, any device that uses DMA API with direct ops correctly
> is spared from the problems, that a hypervisor attempting I/O to a
> non-shared page would bring.
> 
> Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
> ---
>   arch/s390/Kconfig                   |  4 +++
>   arch/s390/include/asm/mem_encrypt.h | 18 +++++++++++++
>   arch/s390/mm/init.c                 | 50 +++++++++++++++++++++++++++++++++++++
>   3 files changed, 72 insertions(+)
>   create mode 100644 arch/s390/include/asm/mem_encrypt.h
> 
> diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
> index 1c3fcf19c3af..5500d05d4d53 100644
> --- a/arch/s390/Kconfig
> +++ b/arch/s390/Kconfig
> @@ -1,4 +1,7 @@
>   # SPDX-License-Identifier: GPL-2.0
> +config ARCH_HAS_MEM_ENCRYPT
> +        def_bool y
> +
>   config MMU
>       def_bool y
>   @@ -191,6 +194,7 @@ config S390
>       select ARCH_HAS_SCALED_CPUTIME
>       select VIRT_TO_BUS
>       select HAVE_NMI
> +    select SWIOTLB
>     config SCHED_OMIT_FRAME_POINTER
> diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h
> new file mode 100644
> index 000000000000..0898c09a888c
> --- /dev/null
> +++ b/arch/s390/include/asm/mem_encrypt.h
> @@ -0,0 +1,18 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef S390_MEM_ENCRYPT_H__
> +#define S390_MEM_ENCRYPT_H__
> +
> +#ifndef __ASSEMBLY__
> +
> +#define sme_me_mask    0ULL
> +
> +static inline bool sme_active(void) { return false; }
> +extern bool sev_active(void);
> +

I noticed this patch always returns false for sme_active. Is it safe to assume that 
whatever fixups are required on x86 to deal with sme do not apply to s390?

> +int set_memory_encrypted(unsigned long addr, int numpages);
> +int set_memory_decrypted(unsigned long addr, int numpages);
> +
> +#endif    /* __ASSEMBLY__ */
> +
> +#endif    /* S390_MEM_ENCRYPT_H__ */
> +
> diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> index 3e82f66d5c61..7e3cbd15dcfa 100644
> --- a/arch/s390/mm/init.c
> +++ b/arch/s390/mm/init.c
> @@ -18,6 +18,7 @@
>   #include <linux/mman.h>
>   #include <linux/mm.h>
>   #include <linux/swap.h>
> +#include <linux/swiotlb.h>
>   #include <linux/smp.h>
>   #include <linux/init.h>
>   #include <linux/pagemap.h>
> @@ -29,6 +30,7 @@
>   #include <linux/export.h>
>   #include <linux/cma.h>
>   #include <linux/gfp.h>
> +#include <linux/dma-mapping.h>
>   #include <asm/processor.h>
>   #include <linux/uaccess.h>
>   #include <asm/pgtable.h>
> @@ -42,6 +44,8 @@
>   #include <asm/sclp.h>
>   #include <asm/set_memory.h>
>   #include <asm/kasan.h>
> +#include <asm/dma-mapping.h>
> +#include <asm/uv.h>
>    pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
>   @@ -126,6 +130,50 @@ void mark_rodata_ro(void)
>       pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
>   }
>   +int set_memory_encrypted(unsigned long addr, int numpages)
> +{
> +    int i;
> +
> +    /* make all pages shared, (swiotlb, dma_free) */

This comment should be "make all pages unshared"?

> +    for (i = 0; i < numpages; ++i) {
> +        uv_remove_shared(addr);
> +        addr += PAGE_SIZE;
> +    }
> +    return 0;
> +}
> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> +
> +int set_memory_decrypted(unsigned long addr, int numpages)
> +{
> +    int i;
> +    /* make all pages shared (swiotlb, dma_alloca) */
> +    for (i = 0; i < numpages; ++i) {
> +        uv_set_shared(addr);
> +        addr += PAGE_SIZE;
> +    }
> +    return 0;
> +}
> +EXPORT_SYMBOL_GPL(set_memory_decrypted);

The addr arguments for the above functions appear to be referring to virtual addresses. 
Would vaddr be a better name?

> +
> +/* are we a protected virtualization guest? */
> +bool sev_active(void)
> +{
> +    return is_prot_virt_guest();
> +}
> +EXPORT_SYMBOL_GPL(sev_active);
> +
> +/* protected virtualization */
> +static void pv_init(void)
> +{
> +    if (!sev_active())
> +        return;
> +
> +    /* make sure bounce buffers are shared */
> +    swiotlb_init(1);
> +    swiotlb_update_mem_attributes();
> +    swiotlb_force = SWIOTLB_FORCE;
> +}
> +
>   void __init mem_init(void)
>   {
>       cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +182,8 @@ void __init mem_init(void)
>       set_max_mapnr(max_low_pfn);
>           high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>   +    pv_init();
> +
>       /* Setup guest page hinting */
>       cmma_init();
>   -- 2.16.4
> 
>
Halil Pasic May 9, 2019, 10:34 p.m. UTC | #6
On Wed, 8 May 2019 15:15:40 +0200
Claudio Imbrenda <imbrenda@linux.ibm.com> wrote:

> On Fri, 26 Apr 2019 20:32:39 +0200
> Halil Pasic <pasic@linux.ibm.com> wrote:
> 
> > On s390, protected virtualization guests have to use bounced I/O
> > buffers.  That requires some plumbing.
> > 
> > Let us make sure, any device that uses DMA API with direct ops
> > correctly is spared from the problems, that a hypervisor attempting
> > I/O to a non-shared page would bring.
> > 
> > Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
> > ---
> >  arch/s390/Kconfig                   |  4 +++
> >  arch/s390/include/asm/mem_encrypt.h | 18 +++++++++++++
> >  arch/s390/mm/init.c                 | 50
> > +++++++++++++++++++++++++++++++++++++ 3 files changed, 72
> > insertions(+) create mode 100644 arch/s390/include/asm/mem_encrypt.h
> > 
> > diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
> > index 1c3fcf19c3af..5500d05d4d53 100644
> > --- a/arch/s390/Kconfig
> > +++ b/arch/s390/Kconfig
> > @@ -1,4 +1,7 @@
> >  # SPDX-License-Identifier: GPL-2.0
> > +config ARCH_HAS_MEM_ENCRYPT
> > +        def_bool y
> > +
> >  config MMU
> >  	def_bool y
> >  
> > @@ -191,6 +194,7 @@ config S390
> >  	select ARCH_HAS_SCALED_CPUTIME
> >  	select VIRT_TO_BUS
> >  	select HAVE_NMI
> > +	select SWIOTLB
> >  
> >  
> >  config SCHED_OMIT_FRAME_POINTER
> > diff --git a/arch/s390/include/asm/mem_encrypt.h
> > b/arch/s390/include/asm/mem_encrypt.h new file mode 100644
> > index 000000000000..0898c09a888c
> > --- /dev/null
> > +++ b/arch/s390/include/asm/mem_encrypt.h
> > @@ -0,0 +1,18 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +#ifndef S390_MEM_ENCRYPT_H__
> > +#define S390_MEM_ENCRYPT_H__
> > +
> > +#ifndef __ASSEMBLY__
> > +
> > +#define sme_me_mask	0ULL
> 
> This is rather ugly, but I understand why it's there
> 

Nod.

> > +
> > +static inline bool sme_active(void) { return false; }
> > +extern bool sev_active(void);
> > +
> > +int set_memory_encrypted(unsigned long addr, int numpages);
> > +int set_memory_decrypted(unsigned long addr, int numpages);
> > +
> > +#endif	/* __ASSEMBLY__ */
> > +
> > +#endif	/* S390_MEM_ENCRYPT_H__ */
> > +
> > diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> > index 3e82f66d5c61..7e3cbd15dcfa 100644
> > --- a/arch/s390/mm/init.c
> > +++ b/arch/s390/mm/init.c
> > @@ -18,6 +18,7 @@
> >  #include <linux/mman.h>
> >  #include <linux/mm.h>
> >  #include <linux/swap.h>
> > +#include <linux/swiotlb.h>
> >  #include <linux/smp.h>
> >  #include <linux/init.h>
> >  #include <linux/pagemap.h>
> > @@ -29,6 +30,7 @@
> >  #include <linux/export.h>
> >  #include <linux/cma.h>
> >  #include <linux/gfp.h>
> > +#include <linux/dma-mapping.h>
> >  #include <asm/processor.h>
> >  #include <linux/uaccess.h>
> >  #include <asm/pgtable.h>
> > @@ -42,6 +44,8 @@
> >  #include <asm/sclp.h>
> >  #include <asm/set_memory.h>
> >  #include <asm/kasan.h>
> > +#include <asm/dma-mapping.h>
> > +#include <asm/uv.h>
> >  
> >  pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
> >  
> > @@ -126,6 +130,50 @@ void mark_rodata_ro(void)
> >  	pr_info("Write protected read-only-after-init data: %luk\n",
> > size >> 10); }
> >  
> > +int set_memory_encrypted(unsigned long addr, int numpages)
> > +{
> > +	int i;
> > +
> > +	/* make all pages shared, (swiotlb, dma_free) */
> 
> this is a copypaste typo, I think? (should be UNshared?)
> also, it doesn't make ALL pages unshared, but only those specified in
> the parameters

Right a copy paste error. Needs correction. The all was meant like all
pages in the range specified by the arguments. But it is better changed
since it turned out to be confusing.

> 
> with this fixed:
> Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
> 

Thanks!

> > +	for (i = 0; i < numpages; ++i) {
> > +		uv_remove_shared(addr);
> > +		addr += PAGE_SIZE;
> > +	}
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> > +
> > +int set_memory_decrypted(unsigned long addr, int numpages)
> > +{
> > +	int i;
> > +	/* make all pages shared (swiotlb, dma_alloca) */
> 
> same here with ALL
> 
> > +	for (i = 0; i < numpages; ++i) {
> > +		uv_set_shared(addr);
> > +		addr += PAGE_SIZE;
> > +	}
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(set_memory_decrypted);
> > +
> > +/* are we a protected virtualization guest? */
> > +bool sev_active(void)
> 
> this is also ugly. the correct solution would be probably to refactor
> everything, including all the AMD SEV code.... let's not go there
> 

Nod. Maybe later.

> > +{
> > +	return is_prot_virt_guest();
> > +}
> > +EXPORT_SYMBOL_GPL(sev_active);
> > +
> > +/* protected virtualization */
> > +static void pv_init(void)
> > +{
> > +	if (!sev_active())
> 
> can't you just use is_prot_virt_guest here?
> 

Sure! I guess it would be less confusing. It is something I did not
remember to change when the interface for this provided by uv.h went
from sketchy to nice.

Thanks again!

Regards,
Halil

> > +		return;
> > +
> > +	/* make sure bounce buffers are shared */
> > +	swiotlb_init(1);
> > +	swiotlb_update_mem_attributes();
> > +	swiotlb_force = SWIOTLB_FORCE;
> > +}
> > +
> >  void __init mem_init(void)
> >  {
> >  	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> > @@ -134,6 +182,8 @@ void __init mem_init(void)
> >  	set_max_mapnr(max_low_pfn);
> >          high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
> >  
> > +	pv_init();
> > +
> >  	/* Setup guest page hinting */
> >  	cmma_init();
> >  
>
Claudio Imbrenda May 10, 2019, 7:49 a.m. UTC | #7
On Thu, 9 May 2019 14:05:20 -0400
"Jason J. Herne" <jjherne@linux.ibm.com> wrote:

[...]

> > +#define sme_me_mask    0ULL
> > +
> > +static inline bool sme_active(void) { return false; }
> > +extern bool sev_active(void);
> > +  
> 
> I noticed this patch always returns false for sme_active. Is it safe
> to assume that whatever fixups are required on x86 to deal with sme
> do not apply to s390?

yes, on x86 sev_active returns false if SEV is enabled. SME is for
host memory encryption. from arch/x86/mm/mem_encrypt.c:

bool sme_active(void)
{
        return sme_me_mask && !sev_enabled;
}

and it makes sense because you can't have both SME and SEV enabled on
the same kernel, because either you're running on bare metal (and then
you can have SME) __or__ you are running as a guest (and then you can
have SEV). The key difference is that DMA operations don't need
bounce buffers with SME, but they do with SEV.

I hope this clarifies your doubts :)

[...]
Michael Mueller May 13, 2019, 12:50 p.m. UTC | #8
On 29.04.19 16:05, Christian Borntraeger wrote:
> 
> 
> On 29.04.19 15:59, Halil Pasic wrote:
>> On Fri, 26 Apr 2019 12:27:11 -0700
>> Christoph Hellwig <hch@infradead.org> wrote:
>>
>>> On Fri, Apr 26, 2019 at 08:32:39PM +0200, Halil Pasic wrote:
>>>> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
>>>
>>>> +EXPORT_SYMBOL_GPL(set_memory_decrypted);
>>>
>>>> +EXPORT_SYMBOL_GPL(sev_active);
>>>
>>> Why do you export these?  I know x86 exports those as well, but
>>> it shoudn't be needed there either.
>>>
>>
>> I export these to be in line with the x86 implementation (which
>> is the original and seems to be the only one at the moment). I assumed
>> that 'exported or not' is kind of a part of the interface definition.
>> Honestly, I did not give it too much thought.
>>
>> For x86 set_memory(en|de)crypted got exported by 95cf9264d5f3 "x86, drm,
>> fbdev: Do not specify encrypted memory for video mappings" (Tom
>> Lendacky, 2017-07-17). With CONFIG_FB_VGA16=m seems to be necessary for x84.
>>
>> If the consensus is don't export: I won't. I'm fine one way or the other.
>> @Christian, what is your take on this?
> 
> If we do not need it today for anything (e.g. virtio-gpu) then we can get rid
> of the exports (and introduce them when necessary).

I'll take them out then.

>>
>> Thank you very much!
>>
>> Regards,
>> Halil
>>
>>
>
Michael Mueller May 15, 2019, 2:15 p.m. UTC | #9
On 10.05.19 00:34, Halil Pasic wrote:
> On Wed, 8 May 2019 15:15:40 +0200
> Claudio Imbrenda <imbrenda@linux.ibm.com> wrote:
> 
>> On Fri, 26 Apr 2019 20:32:39 +0200
>> Halil Pasic <pasic@linux.ibm.com> wrote:
>>
>>> On s390, protected virtualization guests have to use bounced I/O
>>> buffers.  That requires some plumbing.
>>>
>>> Let us make sure, any device that uses DMA API with direct ops
>>> correctly is spared from the problems, that a hypervisor attempting
>>> I/O to a non-shared page would bring.
>>>
>>> Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
>>> ---
>>>   arch/s390/Kconfig                   |  4 +++
>>>   arch/s390/include/asm/mem_encrypt.h | 18 +++++++++++++
>>>   arch/s390/mm/init.c                 | 50
>>> +++++++++++++++++++++++++++++++++++++ 3 files changed, 72
>>> insertions(+) create mode 100644 arch/s390/include/asm/mem_encrypt.h
>>>
>>> diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
>>> index 1c3fcf19c3af..5500d05d4d53 100644
>>> --- a/arch/s390/Kconfig
>>> +++ b/arch/s390/Kconfig
>>> @@ -1,4 +1,7 @@
>>>   # SPDX-License-Identifier: GPL-2.0
>>> +config ARCH_HAS_MEM_ENCRYPT
>>> +        def_bool y
>>> +
>>>   config MMU
>>>   	def_bool y
>>>   
>>> @@ -191,6 +194,7 @@ config S390
>>>   	select ARCH_HAS_SCALED_CPUTIME
>>>   	select VIRT_TO_BUS
>>>   	select HAVE_NMI
>>> +	select SWIOTLB
>>>   
>>>   
>>>   config SCHED_OMIT_FRAME_POINTER
>>> diff --git a/arch/s390/include/asm/mem_encrypt.h
>>> b/arch/s390/include/asm/mem_encrypt.h new file mode 100644
>>> index 000000000000..0898c09a888c
>>> --- /dev/null
>>> +++ b/arch/s390/include/asm/mem_encrypt.h
>>> @@ -0,0 +1,18 @@
>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>> +#ifndef S390_MEM_ENCRYPT_H__
>>> +#define S390_MEM_ENCRYPT_H__
>>> +
>>> +#ifndef __ASSEMBLY__
>>> +
>>> +#define sme_me_mask	0ULL
>>
>> This is rather ugly, but I understand why it's there
>>
> 
> Nod.
> 
>>> +
>>> +static inline bool sme_active(void) { return false; }
>>> +extern bool sev_active(void);
>>> +
>>> +int set_memory_encrypted(unsigned long addr, int numpages);
>>> +int set_memory_decrypted(unsigned long addr, int numpages);
>>> +
>>> +#endif	/* __ASSEMBLY__ */
>>> +
>>> +#endif	/* S390_MEM_ENCRYPT_H__ */
>>> +
>>> diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
>>> index 3e82f66d5c61..7e3cbd15dcfa 100644
>>> --- a/arch/s390/mm/init.c
>>> +++ b/arch/s390/mm/init.c
>>> @@ -18,6 +18,7 @@
>>>   #include <linux/mman.h>
>>>   #include <linux/mm.h>
>>>   #include <linux/swap.h>
>>> +#include <linux/swiotlb.h>
>>>   #include <linux/smp.h>
>>>   #include <linux/init.h>
>>>   #include <linux/pagemap.h>
>>> @@ -29,6 +30,7 @@
>>>   #include <linux/export.h>
>>>   #include <linux/cma.h>
>>>   #include <linux/gfp.h>
>>> +#include <linux/dma-mapping.h>
>>>   #include <asm/processor.h>
>>>   #include <linux/uaccess.h>
>>>   #include <asm/pgtable.h>
>>> @@ -42,6 +44,8 @@
>>>   #include <asm/sclp.h>
>>>   #include <asm/set_memory.h>
>>>   #include <asm/kasan.h>
>>> +#include <asm/dma-mapping.h>
>>> +#include <asm/uv.h>
>>>   
>>>   pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
>>>   
>>> @@ -126,6 +130,50 @@ void mark_rodata_ro(void)
>>>   	pr_info("Write protected read-only-after-init data: %luk\n",
>>> size >> 10); }
>>>   
>>> +int set_memory_encrypted(unsigned long addr, int numpages)
>>> +{
>>> +	int i;
>>> +
>>> +	/* make all pages shared, (swiotlb, dma_free) */
>>
>> this is a copypaste typo, I think? (should be UNshared?)
>> also, it doesn't make ALL pages unshared, but only those specified in
>> the parameters
> 
> Right a copy paste error. Needs correction. The all was meant like all
> pages in the range specified by the arguments. But it is better changed
> since it turned out to be confusing.
> 
>>
>> with this fixed:
>> Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
>>
> 
> Thanks!
> 
>>> +	for (i = 0; i < numpages; ++i) {
>>> +		uv_remove_shared(addr);
>>> +		addr += PAGE_SIZE;
>>> +	}
>>> +	return 0;
>>> +}
>>> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
>>> +
>>> +int set_memory_decrypted(unsigned long addr, int numpages)
>>> +{
>>> +	int i;
>>> +	/* make all pages shared (swiotlb, dma_alloca) */
>>
>> same here with ALL
>>
>>> +	for (i = 0; i < numpages; ++i) {
>>> +		uv_set_shared(addr);
>>> +		addr += PAGE_SIZE;
>>> +	}
>>> +	return 0;
>>> +}
>>> +EXPORT_SYMBOL_GPL(set_memory_decrypted);
>>> +
>>> +/* are we a protected virtualization guest? */
>>> +bool sev_active(void)
>>
>> this is also ugly. the correct solution would be probably to refactor
>> everything, including all the AMD SEV code.... let's not go there
>>
> 
> Nod. Maybe later.
> 
>>> +{
>>> +	return is_prot_virt_guest();
>>> +}
>>> +EXPORT_SYMBOL_GPL(sev_active);
>>> +
>>> +/* protected virtualization */
>>> +static void pv_init(void)
>>> +{
>>> +	if (!sev_active())
>>
>> can't you just use is_prot_virt_guest here?
>>
> 
> Sure! I guess it would be less confusing. It is something I did not
> remember to change when the interface for this provided by uv.h went
> from sketchy to nice.

integrated in v2

Michael

> 
> Thanks again!
> 
> Regards,
> Halil
> 
>>> +		return;
>>> +
>>> +	/* make sure bounce buffers are shared */
>>> +	swiotlb_init(1);
>>> +	swiotlb_update_mem_attributes();
>>> +	swiotlb_force = SWIOTLB_FORCE;
>>> +}
>>> +
>>>   void __init mem_init(void)
>>>   {
>>>   	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
>>> @@ -134,6 +182,8 @@ void __init mem_init(void)
>>>   	set_max_mapnr(max_low_pfn);
>>>           high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>>>   
>>> +	pv_init();
>>> +
>>>   	/* Setup guest page hinting */
>>>   	cmma_init();
>>>   
>>
>

Patch
diff mbox series

diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1c3fcf19c3af..5500d05d4d53 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -1,4 +1,7 @@ 
 # SPDX-License-Identifier: GPL-2.0
+config ARCH_HAS_MEM_ENCRYPT
+        def_bool y
+
 config MMU
 	def_bool y
 
@@ -191,6 +194,7 @@  config S390
 	select ARCH_HAS_SCALED_CPUTIME
 	select VIRT_TO_BUS
 	select HAVE_NMI
+	select SWIOTLB
 
 
 config SCHED_OMIT_FRAME_POINTER
diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h
new file mode 100644
index 000000000000..0898c09a888c
--- /dev/null
+++ b/arch/s390/include/asm/mem_encrypt.h
@@ -0,0 +1,18 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask	0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif	/* __ASSEMBLY__ */
+
+#endif	/* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3e82f66d5c61..7e3cbd15dcfa 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -18,6 +18,7 @@ 
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
+#include <linux/swiotlb.h>
 #include <linux/smp.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
@@ -29,6 +30,7 @@ 
 #include <linux/export.h>
 #include <linux/cma.h>
 #include <linux/gfp.h>
+#include <linux/dma-mapping.h>
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/pgtable.h>
@@ -42,6 +44,8 @@ 
 #include <asm/sclp.h>
 #include <asm/set_memory.h>
 #include <asm/kasan.h>
+#include <asm/dma-mapping.h>
+#include <asm/uv.h>
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
 
@@ -126,6 +130,50 @@  void mark_rodata_ro(void)
 	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
 }
 
+int set_memory_encrypted(unsigned long addr, int numpages)
+{
+	int i;
+
+	/* make all pages shared, (swiotlb, dma_free) */
+	for (i = 0; i < numpages; ++i) {
+		uv_remove_shared(addr);
+		addr += PAGE_SIZE;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(set_memory_encrypted);
+
+int set_memory_decrypted(unsigned long addr, int numpages)
+{
+	int i;
+	/* make all pages shared (swiotlb, dma_alloca) */
+	for (i = 0; i < numpages; ++i) {
+		uv_set_shared(addr);
+		addr += PAGE_SIZE;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(set_memory_decrypted);
+
+/* are we a protected virtualization guest? */
+bool sev_active(void)
+{
+	return is_prot_virt_guest();
+}
+EXPORT_SYMBOL_GPL(sev_active);
+
+/* protected virtualization */
+static void pv_init(void)
+{
+	if (!sev_active())
+		return;
+
+	/* make sure bounce buffers are shared */
+	swiotlb_init(1);
+	swiotlb_update_mem_attributes();
+	swiotlb_force = SWIOTLB_FORCE;
+}
+
 void __init mem_init(void)
 {
 	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -134,6 +182,8 @@  void __init mem_init(void)
 	set_max_mapnr(max_low_pfn);
         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
+	pv_init();
+
 	/* Setup guest page hinting */
 	cmma_init();