diff mbox

[v2,3/3] x86: Make the GDT remapping read-only on 64 bit

Message ID 20170126165940.30799-3-thgarnie@google.com (mailing list archive)
State New, archived
Headers show

Commit Message

Thomas Garnier Jan. 26, 2017, 4:59 p.m. UTC
This patch makes the GDT remapped pages read-only to prevent corruption.
This change is done only on 64 bit.

The native_load_tr_desc function was adapted to correctly handle a
read-only GDT. The LTR instruction always writes to the GDT TSS entry.
This generates a page fault if the GDT is read-only. This change checks
if the current GDT is a remap and swap GDTs as needed. This function was
tested by booting multiple machines and checking hibernation works
properly.

KVM SVM and VMX were adapted to use the writeable GDT. On VMX, the
per-cpu variable was removed for functions to fetch the original GDT.
Instead of reloading the previous GDT, VMX will reload the fixmap GDT as
expected. For testing, VMs were started and restored on multiple
configurations.

Signed-off-by: Thomas Garnier <thgarnie@google.com>
---
Based on next-20170125
---
 arch/x86/include/asm/desc.h      | 46 +++++++++++++++++++++++++++++++++++-----
 arch/x86/include/asm/processor.h |  1 +
 arch/x86/kernel/cpu/common.c     | 28 ++++++++++++++++++------
 arch/x86/kvm/svm.c               |  4 +---
 arch/x86/kvm/vmx.c               | 15 +++++--------
 5 files changed, 70 insertions(+), 24 deletions(-)

Comments

Ingo Molnar Feb. 1, 2017, 9:15 a.m. UTC | #1
* Thomas Garnier <thgarnie@google.com> wrote:

> This patch makes the GDT remapped pages read-only to prevent corruption.
> This change is done only on 64 bit.

Please spell '64-bit' consistently through the series. I've seen two variants:

  64 bit
  64bit

> +/*
> + * The LTR instruction marks the TSS GDT entry as busy. In 64bit, the GDT is
> + * a read-only remapping. To prevent a page fault, the GDT is switched to the
> + * original writeable version when needed.

s/In 64bit,
 /On 64-bit kernels,

> + */
> +#ifdef CONFIG_X86_64
> +static inline void native_load_tr_desc(void)
> +{
> +	struct desc_ptr gdt;
> +	int cpu = raw_smp_processor_id();
> +	bool restore = false;
> +	struct desc_struct *fixmap_gdt;
> +
> +	native_store_gdt(&gdt);
> +	fixmap_gdt = get_cpu_fixmap_gdt(cpu);
> +
> +	/*
> +	 * If the current GDT is the read-only fixmap, swap to the original
> +	 * writeable version. Swap back at the end.
> +	 */
> +	if (gdt.address == (unsigned long)fixmap_gdt) {
> +		load_direct_gdt(cpu);
> +		restore = true;
> +	}
> +	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
> +	if (restore)
> +		load_fixmap_gdt(cpu);

Please use bool plus 0/1, it's more readable (to me) than the true/false notation.

>  extern void switch_to_new_gdt(int);
> +extern void load_direct_gdt(int);
>  extern void load_fixmap_gdt(int);

> +/* Load the original GDT from the per-cpu structure */
> +void load_direct_gdt(int cpu)
> +{
> +	struct desc_ptr gdt_descr;
> +
> +	gdt_descr.address = (long)get_cpu_direct_gdt(cpu);

Please name the functions in an easier to understand way, such as:

	get_cpu_gdt_rw()
	get_cpu_gdt_ro()

that the GDT is in the direct mappings is less important than the fact the the 
address is writable ...

> +}
> +EXPORT_SYMBOL(load_direct_gdt);

EXPORT_SYMBOL_GPL(), or no export at all.

> +EXPORT_SYMBOL(load_fixmap_gdt);

ditto.

>  	 * VT restores TR but not its size.  Useless.
>  	 */
> -	struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
>  	struct desc_struct *descs;
>  
> -	descs = (void *)gdt->address;
> +	descs = (void *)get_current_direct_gdt();

Couldn't the type cast be dropped?

>  
> -	table_base = gdt->address;
> +	table_base = (unsigned long)get_current_direct_gdt();

Instead of spreading these type casts far and wide please introduce another 
accessor the returns 'unsigned long':

	get_cpu_gdt_rw_vaddr()

or such.

Thanks,

	Ingo
Andy Lutomirski Feb. 2, 2017, 5:13 a.m. UTC | #2
On Wed, Feb 1, 2017 at 1:15 AM, Ingo Molnar <mingo@kernel.org> wrote:
>
> * Thomas Garnier <thgarnie@google.com> wrote:
>
>> This patch makes the GDT remapped pages read-only to prevent corruption.
>> This change is done only on 64 bit.
>


>>
>> -     table_base = gdt->address;
>> +     table_base = (unsigned long)get_current_direct_gdt();
>
> Instead of spreading these type casts far and wide please introduce another
> accessor the returns 'unsigned long':
>
>         get_cpu_gdt_rw_vaddr()
>

That whole function is an abomination.  How about replacing 'unsigned
long table_base' with 'struct desc_struct *table'?  If you're feeling
really adventurous, *delete* that function and replace all of its
users with something sane.

--Andy
Andy Lutomirski Feb. 2, 2017, 5:14 a.m. UTC | #3
On Thu, Jan 26, 2017 at 8:59 AM, Thomas Garnier <thgarnie@google.com> wrote:
> This patch makes the GDT remapped pages read-only to prevent corruption.
> This change is done only on 64 bit.
>
> The native_load_tr_desc function was adapted to correctly handle a
> read-only GDT. The LTR instruction always writes to the GDT TSS entry.
> This generates a page fault if the GDT is read-only. This change checks
> if the current GDT is a remap and swap GDTs as needed. This function was
> tested by booting multiple machines and checking hibernation works
> properly.
>
> KVM SVM and VMX were adapted to use the writeable GDT. On VMX, the
> per-cpu variable was removed for functions to fetch the original GDT.
> Instead of reloading the previous GDT, VMX will reload the fixmap GDT as
> expected. For testing, VMs were started and restored on multiple
> configurations.
>
> Signed-off-by: Thomas Garnier <thgarnie@google.com>
> ---
> Based on next-20170125
> ---
>  arch/x86/include/asm/desc.h      | 46 +++++++++++++++++++++++++++++++++++-----
>  arch/x86/include/asm/processor.h |  1 +
>  arch/x86/kernel/cpu/common.c     | 28 ++++++++++++++++++------
>  arch/x86/kvm/svm.c               |  4 +---
>  arch/x86/kvm/vmx.c               | 15 +++++--------
>  5 files changed, 70 insertions(+), 24 deletions(-)
>
> diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
> index 4cc176f57b78..ca7b2224fcb4 100644
> --- a/arch/x86/include/asm/desc.h
> +++ b/arch/x86/include/asm/desc.h
> @@ -52,6 +52,12 @@ static inline struct desc_struct *get_cpu_direct_gdt(unsigned int cpu)
>         return per_cpu(gdt_page, cpu).gdt;
>  }
>
> +/* Provide the current original GDT */
> +static inline struct desc_struct *get_current_direct_gdt(void)
> +{
> +       return this_cpu_ptr(&gdt_page)->gdt;
> +}

I'm assuming that the reason that this isn't part of patch 2 and used
instead of the version that takes cpu as a parameter is that TLS
doesn't work until the GDT is set up.  If so, perhaps that's worthy of
a comment in patch 2.

But give this_cpu_read(gdt_page.gdt) a try, please.

> +/*
> + * The LTR instruction marks the TSS GDT entry as busy. In 64bit, the GDT is
> + * a read-only remapping. To prevent a page fault, the GDT is switched to the
> + * original writeable version when needed.
> + */
> +#ifdef CONFIG_X86_64
> +static inline void native_load_tr_desc(void)
> +{
> +       struct desc_ptr gdt;
> +       int cpu = raw_smp_processor_id();
> +       bool restore = false;
> +       struct desc_struct *fixmap_gdt;
> +
> +       native_store_gdt(&gdt);

Off the top of my head, this is something like 10 cycles.  IMO that's
fast enough not to worry about the regression this will cause to KVM
exits.  In any event, we'll get that back and *much* more when we do
the optimizations that this series enables.
Ingo Molnar Feb. 2, 2017, 7:12 a.m. UTC | #4
* Andy Lutomirski <luto@kernel.org> wrote:

> On Wed, Feb 1, 2017 at 1:15 AM, Ingo Molnar <mingo@kernel.org> wrote:
> >
> > * Thomas Garnier <thgarnie@google.com> wrote:
> >
> >> This patch makes the GDT remapped pages read-only to prevent corruption.
> >> This change is done only on 64 bit.
> >
> 
> 
> >>
> >> -     table_base = gdt->address;
> >> +     table_base = (unsigned long)get_current_direct_gdt();
> >
> > Instead of spreading these type casts far and wide please introduce another
> > accessor the returns 'unsigned long':
> >
> >         get_cpu_gdt_rw_vaddr()
> >
> 
> That whole function is an abomination.  How about replacing 'unsigned
> long table_base' with 'struct desc_struct *table'?  If you're feeling
> really adventurous, *delete* that function and replace all of its
> users with something sane.

Yeah, even better!

Thanks,

	Ingo
Thomas Garnier Feb. 6, 2017, 10:10 p.m. UTC | #5
On Wed, Feb 1, 2017 at 9:14 PM, Andy Lutomirski <luto@kernel.org> wrote:
> On Thu, Jan 26, 2017 at 8:59 AM, Thomas Garnier <thgarnie@google.com> wrote:
>> This patch makes the GDT remapped pages read-only to prevent corruption.
>> This change is done only on 64 bit.
>>
>> The native_load_tr_desc function was adapted to correctly handle a
>> read-only GDT. The LTR instruction always writes to the GDT TSS entry.
>> This generates a page fault if the GDT is read-only. This change checks
>> if the current GDT is a remap and swap GDTs as needed. This function was
>> tested by booting multiple machines and checking hibernation works
>> properly.
>>
>> KVM SVM and VMX were adapted to use the writeable GDT. On VMX, the
>> per-cpu variable was removed for functions to fetch the original GDT.
>> Instead of reloading the previous GDT, VMX will reload the fixmap GDT as
>> expected. For testing, VMs were started and restored on multiple
>> configurations.
>>
>> Signed-off-by: Thomas Garnier <thgarnie@google.com>
>> ---
>> Based on next-20170125
>> ---
>>  arch/x86/include/asm/desc.h      | 46 +++++++++++++++++++++++++++++++++++-----
>>  arch/x86/include/asm/processor.h |  1 +
>>  arch/x86/kernel/cpu/common.c     | 28 ++++++++++++++++++------
>>  arch/x86/kvm/svm.c               |  4 +---
>>  arch/x86/kvm/vmx.c               | 15 +++++--------
>>  5 files changed, 70 insertions(+), 24 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
>> index 4cc176f57b78..ca7b2224fcb4 100644
>> --- a/arch/x86/include/asm/desc.h
>> +++ b/arch/x86/include/asm/desc.h
>> @@ -52,6 +52,12 @@ static inline struct desc_struct *get_cpu_direct_gdt(unsigned int cpu)
>>         return per_cpu(gdt_page, cpu).gdt;
>>  }
>>
>> +/* Provide the current original GDT */
>> +static inline struct desc_struct *get_current_direct_gdt(void)
>> +{
>> +       return this_cpu_ptr(&gdt_page)->gdt;
>> +}
>
> I'm assuming that the reason that this isn't part of patch 2 and used
> instead of the version that takes cpu as a parameter is that TLS
> doesn't work until the GDT is set up.  If so, perhaps that's worthy of
> a comment in patch 2.
>
> But give this_cpu_read(gdt_page.gdt) a try, please.
>

I tried but I can't get it working properly because the gdt field is
an array, not a pointer. For example with this_cpu_read(gdt_page.gdt),
I get:

./arch/x86/include/asm/desc.h: In function ‘get_current_gdt_rw’:
./include/linux/percpu-defs.h:308:21: error: incompatible types when
assigning to type ‘struct desc_struct[16]’ from type ‘struct
desc_struct *’
  case 1: pscr_ret__ = stem##1(variable); break;   \
                     ^
I tried different variants without success. What do you think?

>> +/*
>> + * The LTR instruction marks the TSS GDT entry as busy. In 64bit, the GDT is
>> + * a read-only remapping. To prevent a page fault, the GDT is switched to the
>> + * original writeable version when needed.
>> + */
>> +#ifdef CONFIG_X86_64
>> +static inline void native_load_tr_desc(void)
>> +{
>> +       struct desc_ptr gdt;
>> +       int cpu = raw_smp_processor_id();
>> +       bool restore = false;
>> +       struct desc_struct *fixmap_gdt;
>> +
>> +       native_store_gdt(&gdt);
>
> Off the top of my head, this is something like 10 cycles.  IMO that's
> fast enough not to worry about the regression this will cause to KVM
> exits.  In any event, we'll get that back and *much* more when we do
> the optimizations that this series enables.
diff mbox

Patch

diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 4cc176f57b78..ca7b2224fcb4 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -52,6 +52,12 @@  static inline struct desc_struct *get_cpu_direct_gdt(unsigned int cpu)
 	return per_cpu(gdt_page, cpu).gdt;
 }
 
+/* Provide the current original GDT */
+static inline struct desc_struct *get_current_direct_gdt(void)
+{
+	return this_cpu_ptr(&gdt_page)->gdt;
+}
+
 /* Get the fixmap index for a specific processor */
 static inline unsigned int get_cpu_fixmap_gdt_index(int cpu)
 {
@@ -223,11 +229,6 @@  static inline void native_set_ldt(const void *addr, unsigned int entries)
 	}
 }
 
-static inline void native_load_tr_desc(void)
-{
-	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
-}
-
 static inline void native_load_gdt(const struct desc_ptr *dtr)
 {
 	asm volatile("lgdt %0"::"m" (*dtr));
@@ -248,6 +249,41 @@  static inline void native_store_idt(struct desc_ptr *dtr)
 	asm volatile("sidt %0":"=m" (*dtr));
 }
 
+/*
+ * The LTR instruction marks the TSS GDT entry as busy. In 64bit, the GDT is
+ * a read-only remapping. To prevent a page fault, the GDT is switched to the
+ * original writeable version when needed.
+ */
+#ifdef CONFIG_X86_64
+static inline void native_load_tr_desc(void)
+{
+	struct desc_ptr gdt;
+	int cpu = raw_smp_processor_id();
+	bool restore = false;
+	struct desc_struct *fixmap_gdt;
+
+	native_store_gdt(&gdt);
+	fixmap_gdt = get_cpu_fixmap_gdt(cpu);
+
+	/*
+	 * If the current GDT is the read-only fixmap, swap to the original
+	 * writeable version. Swap back at the end.
+	 */
+	if (gdt.address == (unsigned long)fixmap_gdt) {
+		load_direct_gdt(cpu);
+		restore = true;
+	}
+	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
+	if (restore)
+		load_fixmap_gdt(cpu);
+}
+#else
+static inline void native_load_tr_desc(void)
+{
+	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
+}
+#endif
+
 static inline unsigned long native_store_tr(void)
 {
 	unsigned long tr;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 22801fd345dc..e8e68b00a2ec 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -705,6 +705,7 @@  extern struct desc_ptr		early_gdt_descr;
 
 extern void cpu_set_gdt(int);
 extern void switch_to_new_gdt(int);
+extern void load_direct_gdt(int);
 extern void load_fixmap_gdt(int);
 extern void load_percpu_segment(int);
 extern void cpu_init(void);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 15f06cf3e3d4..a7a54f57b68a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -444,13 +444,31 @@  void load_percpu_segment(int cpu)
 	load_stack_canary_segment();
 }
 
+/* On 64bit the GDT remapping is read-only */
+#ifdef CONFIG_X86_64
+#define PAGE_FIXMAP_GDT PAGE_KERNEL_RO
+#else
+#define PAGE_FIXMAP_GDT PAGE_KERNEL
+#endif
+
 /* Setup the fixmap mapping only once per-processor */
 static inline void setup_fixmap_gdt(int cpu)
 {
 	__set_fixmap(get_cpu_fixmap_gdt_index(cpu),
-		     __pa(get_cpu_direct_gdt(cpu)), PAGE_KERNEL);
+		     __pa(get_cpu_direct_gdt(cpu)), PAGE_FIXMAP_GDT);
 }
 
+/* Load the original GDT from the per-cpu structure */
+void load_direct_gdt(int cpu)
+{
+	struct desc_ptr gdt_descr;
+
+	gdt_descr.address = (long)get_cpu_direct_gdt(cpu);
+	gdt_descr.size = GDT_SIZE - 1;
+	load_gdt(&gdt_descr);
+}
+EXPORT_SYMBOL(load_direct_gdt);
+
 /* Load a fixmap remapping of the per-cpu GDT */
 void load_fixmap_gdt(int cpu)
 {
@@ -460,6 +478,7 @@  void load_fixmap_gdt(int cpu)
 	gdt_descr.size = GDT_SIZE - 1;
 	load_gdt(&gdt_descr);
 }
+EXPORT_SYMBOL(load_fixmap_gdt);
 
 /*
  * Current gdt points %fs at the "master" per-cpu area: after this,
@@ -467,11 +486,8 @@  void load_fixmap_gdt(int cpu)
  */
 void switch_to_new_gdt(int cpu)
 {
-	struct desc_ptr gdt_descr;
-
-	gdt_descr.address = (long)get_cpu_direct_gdt(cpu);
-	gdt_descr.size = GDT_SIZE - 1;
-	load_gdt(&gdt_descr);
+	/* Load the original GDT */
+	load_direct_gdt(cpu);
 	/* Reload the per-cpu base */
 	load_percpu_segment(cpu);
 }
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d0414f054bdf..7864f4c813a1 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -741,7 +741,6 @@  static int svm_hardware_enable(void)
 
 	struct svm_cpu_data *sd;
 	uint64_t efer;
-	struct desc_ptr gdt_descr;
 	struct desc_struct *gdt;
 	int me = raw_smp_processor_id();
 
@@ -763,8 +762,7 @@  static int svm_hardware_enable(void)
 	sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
 	sd->next_asid = sd->max_asid + 1;
 
-	native_store_gdt(&gdt_descr);
-	gdt = (struct desc_struct *)gdt_descr.address;
+	gdt = get_current_direct_gdt();
 	sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
 
 	wrmsrl(MSR_EFER, efer | EFER_SVME);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4e691035a32d..c64fa6e0417c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -935,7 +935,6 @@  static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
  */
 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
-static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
 
 /*
  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
@@ -1997,10 +1996,9 @@  static void reload_tss(void)
 	/*
 	 * VT restores TR but not its size.  Useless.
 	 */
-	struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
 	struct desc_struct *descs;
 
-	descs = (void *)gdt->address;
+	descs = (void *)get_current_direct_gdt();
 	descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
 	load_TR_desc();
 }
@@ -2061,7 +2059,6 @@  static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
 
 static unsigned long segment_base(u16 selector)
 {
-	struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
 	struct desc_struct *d;
 	unsigned long table_base;
 	unsigned long v;
@@ -2069,7 +2066,7 @@  static unsigned long segment_base(u16 selector)
 	if (!(selector & ~3))
 		return 0;
 
-	table_base = gdt->address;
+	table_base = (unsigned long)get_current_direct_gdt();
 
 	if (selector & 4) {           /* from ldt */
 		u16 ldt_selector = kvm_read_ldt();
@@ -2185,7 +2182,7 @@  static void __vmx_load_host_state(struct vcpu_vmx *vmx)
 #endif
 	if (vmx->host_state.msr_host_bndcfgs)
 		wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
-	load_gdt(this_cpu_ptr(&host_gdt));
+	load_fixmap_gdt(raw_smp_processor_id());
 }
 
 static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -2287,7 +2284,7 @@  static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	}
 
 	if (!already_loaded) {
-		struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
+		unsigned long gdt = (unsigned long)get_current_direct_gdt();
 		unsigned long sysenter_esp;
 
 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
@@ -2297,7 +2294,7 @@  static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		 * processors.
 		 */
 		vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
-		vmcs_writel(HOST_GDTR_BASE, gdt->address);   /* 22.2.4 */
+		vmcs_writel(HOST_GDTR_BASE, gdt);   /* 22.2.4 */
 
 		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
 		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
@@ -3523,8 +3520,6 @@  static int hardware_enable(void)
 		ept_sync_global();
 	}
 
-	native_store_gdt(this_cpu_ptr(&host_gdt));
-
 	return 0;
 }