diff mbox series

mini-os: correct memory access rights for pvh mode

Message ID 20200815111557.29564-1-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series mini-os: correct memory access rights for pvh mode | expand

Commit Message

Jürgen Groß Aug. 15, 2020, 11:15 a.m. UTC
When running as a PVH guest the memory access rights are not set
correctly: _PAGE_USER should not be set and CR0.WP should be set.
Especially CR0.WP is important in order to let the allocate on
demand feature work, as it requires a page fault when writing to a
read-only page.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 arch/x86/x86_hvm.S    |  2 +-
 include/x86/arch_mm.h | 18 ++++++++++++------
 include/x86/os.h      |  1 +
 3 files changed, 14 insertions(+), 7 deletions(-)

Comments

Samuel Thibault Aug. 15, 2020, 9:40 p.m. UTC | #1
Juergen Gross, le sam. 15 août 2020 13:15:57 +0200, a ecrit:
> When running as a PVH guest the memory access rights are not set
> correctly: _PAGE_USER should not be set and CR0.WP should be set.
> Especially CR0.WP is important in order to let the allocate on
> demand feature work, as it requires a page fault when writing to a
> read-only page.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>

Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org>

> ---
>  arch/x86/x86_hvm.S    |  2 +-
>  include/x86/arch_mm.h | 18 ++++++++++++------
>  include/x86/os.h      |  1 +
>  3 files changed, 14 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/x86_hvm.S b/arch/x86/x86_hvm.S
> index 6e8ad98..42a5f02 100644
> --- a/arch/x86/x86_hvm.S
> +++ b/arch/x86/x86_hvm.S
> @@ -20,7 +20,7 @@ _start:
>  #endif /* __x86_64__ */
>  
>          mov %cr0, %eax
> -        or $X86_CR0_PG, %eax
> +        or $(X86_CR0_PG | X86_CR0_WP), %eax
>          mov %eax, %cr0
>  
>          lgdt gdt_ptr
> diff --git a/include/x86/arch_mm.h b/include/x86/arch_mm.h
> index cbbeb21..ffbec5a 100644
> --- a/include/x86/arch_mm.h
> +++ b/include/x86/arch_mm.h
> @@ -171,17 +171,23 @@ typedef unsigned long pgentry_t;
>  #define _PAGE_PSE      CONST(0x080)
>  #define _PAGE_GLOBAL   CONST(0x100)
>  
> +#ifdef CONFIG_PARAVIRT
> +#define PAGE_USER _PAGE_USER
> +#else
> +#define PAGE_USER CONST(0)
> +#endif
> +
>  #if defined(__i386__)
>  #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
>  #define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED)
> -#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
> +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |PAGE_USER)
>  #define L3_PROT (_PAGE_PRESENT)
>  #elif defined(__x86_64__)
> -#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
> -#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_USER)
> -#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
> -#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
> -#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
> +#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|PAGE_USER)
> +#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED|PAGE_USER)
> +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|PAGE_USER)
> +#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|PAGE_USER)
> +#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|PAGE_USER)
>  #endif /* __i386__ || __x86_64__ */
>  
>  /* flags for ioremap */
> diff --git a/include/x86/os.h b/include/x86/os.h
> index a73b63e..46a824d 100644
> --- a/include/x86/os.h
> +++ b/include/x86/os.h
> @@ -27,6 +27,7 @@
>  #define MSR_EFER          0xc0000080
>  #define _EFER_LME         8             /* Long mode enable */
>  
> +#define X86_CR0_WP        0x00010000    /* Write protect */
>  #define X86_CR0_PG        0x80000000    /* Paging */
>  #define X86_CR4_PAE       0x00000020    /* enable physical address extensions */
>  #define X86_CR4_OSFXSR    0x00000200    /* enable fast FPU save and restore */
> -- 
> 2.26.2
>
Wei Liu Aug. 17, 2020, 9:56 a.m. UTC | #2
On Sat, Aug 15, 2020 at 11:40:02PM +0200, Samuel Thibault wrote:
> Juergen Gross, le sam. 15 août 2020 13:15:57 +0200, a ecrit:
> > When running as a PVH guest the memory access rights are not set
> > correctly: _PAGE_USER should not be set and CR0.WP should be set.
> > Especially CR0.WP is important in order to let the allocate on
> > demand feature work, as it requires a page fault when writing to a
> > read-only page.
> > 
> > Signed-off-by: Juergen Gross <jgross@suse.com>
> 
> Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org>

Applied.
diff mbox series

Patch

diff --git a/arch/x86/x86_hvm.S b/arch/x86/x86_hvm.S
index 6e8ad98..42a5f02 100644
--- a/arch/x86/x86_hvm.S
+++ b/arch/x86/x86_hvm.S
@@ -20,7 +20,7 @@  _start:
 #endif /* __x86_64__ */
 
         mov %cr0, %eax
-        or $X86_CR0_PG, %eax
+        or $(X86_CR0_PG | X86_CR0_WP), %eax
         mov %eax, %cr0
 
         lgdt gdt_ptr
diff --git a/include/x86/arch_mm.h b/include/x86/arch_mm.h
index cbbeb21..ffbec5a 100644
--- a/include/x86/arch_mm.h
+++ b/include/x86/arch_mm.h
@@ -171,17 +171,23 @@  typedef unsigned long pgentry_t;
 #define _PAGE_PSE      CONST(0x080)
 #define _PAGE_GLOBAL   CONST(0x100)
 
+#ifdef CONFIG_PARAVIRT
+#define PAGE_USER _PAGE_USER
+#else
+#define PAGE_USER CONST(0)
+#endif
+
 #if defined(__i386__)
 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
 #define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED)
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |PAGE_USER)
 #define L3_PROT (_PAGE_PRESENT)
 #elif defined(__x86_64__)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
-#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_USER)
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|PAGE_USER)
+#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED|PAGE_USER)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|PAGE_USER)
+#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|PAGE_USER)
+#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|PAGE_USER)
 #endif /* __i386__ || __x86_64__ */
 
 /* flags for ioremap */
diff --git a/include/x86/os.h b/include/x86/os.h
index a73b63e..46a824d 100644
--- a/include/x86/os.h
+++ b/include/x86/os.h
@@ -27,6 +27,7 @@ 
 #define MSR_EFER          0xc0000080
 #define _EFER_LME         8             /* Long mode enable */
 
+#define X86_CR0_WP        0x00010000    /* Write protect */
 #define X86_CR0_PG        0x80000000    /* Paging */
 #define X86_CR4_PAE       0x00000020    /* enable physical address extensions */
 #define X86_CR4_OSFXSR    0x00000200    /* enable fast FPU save and restore */