diff mbox

[v5,22/23] arm64: KVM: Allow mapping of vectors outside of the RAM region

Message ID 20180301155538.26860-23-marc.zyngier@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Marc Zyngier March 1, 2018, 3:55 p.m. UTC
We're now ready to map our vectors in weird and wonderful locations.
On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
and gets mapped outside of the normal RAM region, next to the
idmap.

That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
of the rest of the hypervisor code.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 Documentation/arm64/memory.txt   |  3 +-
 arch/arm64/Kconfig               | 16 ++++++++
 arch/arm64/include/asm/kvm_mmu.h | 81 ++++++++++++++++++++++++++++++++++------
 arch/arm64/include/asm/mmu.h     |  5 ++-
 arch/arm64/kernel/Makefile       |  4 +-
 arch/arm64/kvm/va_layout.c       |  3 ++
 6 files changed, 96 insertions(+), 16 deletions(-)

Comments

Andrew Jones March 8, 2018, 5:54 p.m. UTC | #1
On Thu, Mar 01, 2018 at 03:55:37PM +0000, Marc Zyngier wrote:
> We're now ready to map our vectors in weird and wonderful locations.
> On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
> if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
> and gets mapped outside of the normal RAM region, next to the
> idmap.
> 
> That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
> of the rest of the hypervisor code.
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  Documentation/arm64/memory.txt   |  3 +-
>  arch/arm64/Kconfig               | 16 ++++++++
>  arch/arm64/include/asm/kvm_mmu.h | 81 ++++++++++++++++++++++++++++++++++------
>  arch/arm64/include/asm/mmu.h     |  5 ++-
>  arch/arm64/kernel/Makefile       |  4 +-
>  arch/arm64/kvm/va_layout.c       |  3 ++
>  6 files changed, 96 insertions(+), 16 deletions(-)
> 
> diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
> index c58cc5dbe667..c5dab30d3389 100644
> --- a/Documentation/arm64/memory.txt
> +++ b/Documentation/arm64/memory.txt
> @@ -90,7 +90,8 @@ When using KVM without the Virtualization Host Extensions, the
>  hypervisor maps kernel pages in EL2 at a fixed (and potentially
>  random) offset from the linear mapping. See the kern_hyp_va macro and
>  kvm_update_va_mask function for more details. MMIO devices such as
> -GICv2 gets mapped next to the HYP idmap page.
> +GICv2 gets mapped next to the HYP idmap page, as do vectors when
> +ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
>  
>  When using KVM with the Virtualization Host Extensions, no additional
>  mappings are created, since the host kernel runs directly in EL2.
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 7381eeb7ef8e..e6be4393aaad 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -904,6 +904,22 @@ config HARDEN_BRANCH_PREDICTOR
>  
>  	  If unsure, say Y.
>  
> +config HARDEN_EL2_VECTORS
> +	bool "Harden EL2 vector mapping against system register leak" if EXPERT
> +	default y
> +	help
> +	  Speculation attacks against some high-performance processors can
> +	  be used to leak privileged information such as the vector base
> +	  register, resulting in a potential defeat of the EL2 layout
> +	  randomization.
> +
> +	  This config option will map the vectors to a fixed location,
> +	  independent of the EL2 code mapping, so that revealing VBAR_EL2
> +	  to an attacker does no give away any extra information. This
> +	  only gets enabled on affected CPUs.
> +
> +	  If unsure, say Y.
> +
>  menuconfig ARMV8_DEPRECATED
>  	bool "Emulate deprecated/obsolete ARMv8 instructions"
>  	depends on COMPAT
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 3da9e5aea936..433d13d0c271 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -360,33 +360,90 @@ static inline unsigned int kvm_get_vmid_bits(void)
>  	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
>  }
>  
> -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
> +#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
> +     defined(CONFIG_HARDEN_EL2_VECTORS))
> +/*
> + * EL2 vectors can be mapped and rerouted in a number of ways,
> + * depending on the kernel configuration and CPU present:
> + *
> + * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
> + *   hardening sequence is placed in one of the vector slots, which is
> + *   executed before jumping to the real vectors.
> + *
> + * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS and BP
> + *   hardening, the slot containing the hardening sequence is mapped
> + *   next to the idmap page, and executed before jumping to the real
> + *   vectors.
> + *
> + * - If the CPU only has ARM64_HARDEN_EL2_VECTORS, then an empty slot
> + *   is selected, mapped next to the idmap page, and executed before
> + *   jumping to the real vectors.
> + *
> + * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
> + * VHE, as we don't have hypervisor-specific mappings. If the system
> + * is VHE and yet selects this capability, it will be ignored.
> + */
>  #include <asm/mmu.h>
>  
> +extern void *__kvm_bp_vect_base;
> +extern int __kvm_harden_el2_vector_slot;
> +
>  static inline void *kvm_get_hyp_vector(void)
>  {
>  	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
> -	void *vect = kvm_ksym_ref(__kvm_hyp_vector);
> +	int slot = -1;
> +
> +	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn)
> +		slot = data->hyp_vectors_slot;
> +
> +	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) &&
> +	    !has_vhe() && slot == -1)
> +		slot = __kvm_harden_el2_vector_slot;
>  
> -	if (data->fn) {
> -		vect = __bp_harden_hyp_vecs_start +
> -		       data->hyp_vectors_slot * SZ_2K;
> +	if (slot != -1) {
> +		void *vect;
>  
>  		if (!has_vhe())
> -			vect = lm_alias(vect);
> +			vect = __kvm_bp_vect_base;
> +		else
> +			vect = __bp_harden_hyp_vecs_start;
> +		vect += slot * SZ_2K;
> +
> +		return vect;
>  	}
>  
> -	vect = kern_hyp_va(vect);
> -	return vect;
> +	return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
>  }

I had trouble reading the above function. How about something like?
(Assuming I got the logic right.)

 static inline void *kvm_get_hyp_vector(void)
 {
        struct bp_hardening_data *data = arm64_get_bp_hardening_data();
        void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
        int slot = -1;

        if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
                vect = __bp_harden_hyp_vecs_start;
                slot = data->hyp_vectors_slot;
        }

        if (!has_vhe() && cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
                vect = __kvm_bp_vect_base;
                if (slot == -1)
                        slot = __kvm_harden_el2_vector_slot;
        }

        if (slot != -1)
                vect += slot * SZ_2K;

        return vect;
 }

>  
> +/*  This is only called on a !VHE system */
>  static inline int kvm_map_vectors(void)
>  {
> -	return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> -				   kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> -				   PAGE_HYP_EXEC);
> -}
> +	phys_addr_t vect_pa = virt_to_phys(__bp_harden_hyp_vecs_start);
> +	unsigned long size = __bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start;

nit: We only use these expressions once, so could probably do away with
the variables. Or the variables could be tucked into the block they're
used in below.

> +
> +	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {

Moving the create_hyp_mappings() under this cap check looks like a fix
that could be posted separately?

> +		int ret;
> +
> +		ret = create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> +					  kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> +					  PAGE_HYP_EXEC);
> +
> +		if (ret)
> +			return ret;
> +
> +		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
> +		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
> +	}
> +
> +	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
> +		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);

If I understood the logic in the above function correctly, then we won't
be using this slot when we have the ARM64_HARDEN_BRANCH_PREDICTOR cap.
Should we even bother allocating it when we don't intend to use it?

> +		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
> +		return create_hyp_exec_mappings(vect_pa, size,
> +						&__kvm_bp_vect_base);
> +	}
>  
> +	return 0;
> +}
>  #else
>  static inline void *kvm_get_hyp_vector(void)
>  {
> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
> index 3baf010fe883..0b0cc69031c1 100644
> --- a/arch/arm64/include/asm/mmu.h
> +++ b/arch/arm64/include/asm/mmu.h
> @@ -51,10 +51,12 @@ struct bp_hardening_data {
>  	bp_hardening_cb_t	fn;
>  };
>  
> -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
> +#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
> +     defined(CONFIG_HARDEN_EL2_VECTORS))
>  extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
>  extern atomic_t arm64_el2_vector_last_slot;
>  
> +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
>  DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
>  
>  static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
> @@ -81,6 +83,7 @@ static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
>  
>  static inline void arm64_apply_bp_hardening(void)	{ }
>  #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
> +#endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
>  
>  extern void paging_init(void);
>  extern void bootmem_init(void);
> diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
> index b87541360f43..e7fc471c91a6 100644
> --- a/arch/arm64/kernel/Makefile
> +++ b/arch/arm64/kernel/Makefile
> @@ -54,8 +54,8 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
>  arm64-obj-$(CONFIG_CRASH_DUMP)		+= crash_dump.o
>  arm64-obj-$(CONFIG_ARM_SDE_INTERFACE)	+= sdei.o
>  
> -ifeq ($(CONFIG_KVM),y)
> -arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR)	+= bpi.o
> +ifneq ($(filter y,$(CONFIG_HARDEN_BRANCH_PREDICTOR) $(CONFIG_HARDEN_EL2_VECTORS)),)
> +arm64-obj-$(CONFIG_KVM)			+= bpi.o
>  endif
>  
>  obj-y					+= $(arm64-obj-y) vdso/ probes/
> diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
> index 7ef3d920c8d4..5d17bb50287c 100644
> --- a/arch/arm64/kvm/va_layout.c
> +++ b/arch/arm64/kvm/va_layout.c
> @@ -153,6 +153,9 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
>  	}
>  }
>  
> +void *__kvm_bp_vect_base;
> +int __kvm_harden_el2_vector_slot;
> +
>  void kvm_patch_vector_branch(struct alt_instr *alt,
>  			     __le32 *origptr, __le32 *updptr, int nr_inst)
>  {
> -- 
> 2.14.2
>

Thanks,
drew
James Morse March 9, 2018, 6:59 p.m. UTC | #2
Hi Marc,

On 01/03/18 15:55, Marc Zyngier wrote:
> We're now ready to map our vectors in weird and wonderful locations.
> On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
> if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
> and gets mapped outside of the normal RAM region, next to the
> idmap.
> 
> That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
> of the rest of the hypervisor code.

> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 3da9e5aea936..433d13d0c271 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h

[..]

>  
> +/*  This is only called on a !VHE system */
>  static inline int kvm_map_vectors(void)
>  {
> -	return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> -				   kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> -				   PAGE_HYP_EXEC);
> -}
> +	phys_addr_t vect_pa = virt_to_phys(__bp_harden_hyp_vecs_start);

__pa_symbol()?

A gift from CONFIG_DEBUG_VIRTUAL:

[    3.479878] kvm [1]: 8-bit VMID
[    3.500761] ------------[ cut here ]------------
[    3.505608] virt_to_phys used for non-linear address: 000000006fa7ae39
(__bp_harden_hyp_vecs_start+0x0/0x2000)
[    3.515907] WARNING: CPU: 3 PID: 1 at ../arch/arm64/mm/physaddr.c:15
__virt_to_phys+0x48/0x68
[    3.524614] Modules linked in:
[    3.527782] CPU: 3 PID: 1 Comm: swapper/0 Not tainted
4.16.0-rc4-00024-gf6f4460e41ba-dirty #9396
[    3.536751] Hardware name: ARM Juno development board (r1) (DT)
[    3.542806] pstate: 80400005 (Nzcv daif +PAN -UAO)
[    3.547716] pc : __virt_to_phys+0x48/0x68
[    3.551832] lr : __virt_to_phys+0x48/0x68

[    3.641447] Call trace:
[    3.643975]  __virt_to_phys+0x48/0x68
[    3.647739]  kvm_arch_init+0x2fc/0x734
[    3.651589]  kvm_init+0x28/0x2b0
[    3.654910]  arm_init+0x1c/0x24
[    3.658143]  do_one_initcall+0x38/0x11c
[    3.662083]  kernel_init_freeable+0x1e0/0x27c
[    3.666552]  kernel_init+0x10/0xfc
[    3.670049]  ret_from_fork+0x10/0x18
[    3.673731] ---[ end trace d4ef061e4bf05fc6 ]---
[    3.678870] kvm [1]: vgic-v2@2c04f000
[    3.683424] kvm [1]: vgic interrupt IRQ1
[    3.687675] kvm [1]: virtual timer IRQ5
[    3.692375] kvm [1]: Hyp mode initialized successfully
[    3.718640] Initialise system trusted keyrings



> +	unsigned long size = __bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start;
> +
> +	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
> +		int ret;
> +
> +		ret = create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> +					  kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> +					  PAGE_HYP_EXEC);
> +
> +		if (ret)
> +			return ret;
> +
> +		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
> +		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
> +	}
> +
> +	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
> +		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
> +		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
> +		return create_hyp_exec_mappings(vect_pa, size,
> +						&__kvm_bp_vect_base);
> +	}
>  
> +	return 0;
> +}


Thanks,

James
Marc Zyngier March 12, 2018, 2:23 p.m. UTC | #3
On 09/03/18 18:59, James Morse wrote:
> Hi Marc,
> 
> On 01/03/18 15:55, Marc Zyngier wrote:
>> We're now ready to map our vectors in weird and wonderful locations.
>> On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
>> if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
>> and gets mapped outside of the normal RAM region, next to the
>> idmap.
>>
>> That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
>> of the rest of the hypervisor code.
> 
>> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
>> index 3da9e5aea936..433d13d0c271 100644
>> --- a/arch/arm64/include/asm/kvm_mmu.h
>> +++ b/arch/arm64/include/asm/kvm_mmu.h
> 
> [..]
> 
>>  
>> +/*  This is only called on a !VHE system */
>>  static inline int kvm_map_vectors(void)
>>  {
>> -	return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
>> -				   kvm_ksym_ref(__bp_harden_hyp_vecs_end),
>> -				   PAGE_HYP_EXEC);
>> -}
>> +	phys_addr_t vect_pa = virt_to_phys(__bp_harden_hyp_vecs_start);
> 
> __pa_symbol()?
> 
> A gift from CONFIG_DEBUG_VIRTUAL:
> 
> [    3.479878] kvm [1]: 8-bit VMID
> [    3.500761] ------------[ cut here ]------------
> [    3.505608] virt_to_phys used for non-linear address: 000000006fa7ae39
> (__bp_harden_hyp_vecs_start+0x0/0x2000)
> [    3.515907] WARNING: CPU: 3 PID: 1 at ../arch/arm64/mm/physaddr.c:15
> __virt_to_phys+0x48/0x68
> [    3.524614] Modules linked in:
> [    3.527782] CPU: 3 PID: 1 Comm: swapper/0 Not tainted
> 4.16.0-rc4-00024-gf6f4460e41ba-dirty #9396
> [    3.536751] Hardware name: ARM Juno development board (r1) (DT)
> [    3.542806] pstate: 80400005 (Nzcv daif +PAN -UAO)
> [    3.547716] pc : __virt_to_phys+0x48/0x68
> [    3.551832] lr : __virt_to_phys+0x48/0x68
> 
> [    3.641447] Call trace:
> [    3.643975]  __virt_to_phys+0x48/0x68
> [    3.647739]  kvm_arch_init+0x2fc/0x734
> [    3.651589]  kvm_init+0x28/0x2b0
> [    3.654910]  arm_init+0x1c/0x24
> [    3.658143]  do_one_initcall+0x38/0x11c
> [    3.662083]  kernel_init_freeable+0x1e0/0x27c
> [    3.666552]  kernel_init+0x10/0xfc
> [    3.670049]  ret_from_fork+0x10/0x18
> [    3.673731] ---[ end trace d4ef061e4bf05fc6 ]---
> [    3.678870] kvm [1]: vgic-v2@2c04f000
> [    3.683424] kvm [1]: vgic interrupt IRQ1
> [    3.687675] kvm [1]: virtual timer IRQ5
> [    3.692375] kvm [1]: Hyp mode initialized successfully
> [    3.718640] Initialise system trusted keyrings
Nice catch. Fixed locally.

Thanks,

	M.
Marc Zyngier March 13, 2018, 10:30 a.m. UTC | #4
Hi Drew,

On 08/03/18 17:54, Andrew Jones wrote:
> On Thu, Mar 01, 2018 at 03:55:37PM +0000, Marc Zyngier wrote:
>> We're now ready to map our vectors in weird and wonderful locations.
>> On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
>> if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
>> and gets mapped outside of the normal RAM region, next to the
>> idmap.
>>
>> That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
>> of the rest of the hypervisor code.
>>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>>  Documentation/arm64/memory.txt   |  3 +-
>>  arch/arm64/Kconfig               | 16 ++++++++
>>  arch/arm64/include/asm/kvm_mmu.h | 81 ++++++++++++++++++++++++++++++++++------
>>  arch/arm64/include/asm/mmu.h     |  5 ++-
>>  arch/arm64/kernel/Makefile       |  4 +-
>>  arch/arm64/kvm/va_layout.c       |  3 ++
>>  6 files changed, 96 insertions(+), 16 deletions(-)
>>
>> diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
>> index c58cc5dbe667..c5dab30d3389 100644
>> --- a/Documentation/arm64/memory.txt
>> +++ b/Documentation/arm64/memory.txt
>> @@ -90,7 +90,8 @@ When using KVM without the Virtualization Host Extensions, the
>>  hypervisor maps kernel pages in EL2 at a fixed (and potentially
>>  random) offset from the linear mapping. See the kern_hyp_va macro and
>>  kvm_update_va_mask function for more details. MMIO devices such as
>> -GICv2 gets mapped next to the HYP idmap page.
>> +GICv2 gets mapped next to the HYP idmap page, as do vectors when
>> +ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
>>  
>>  When using KVM with the Virtualization Host Extensions, no additional
>>  mappings are created, since the host kernel runs directly in EL2.
>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>> index 7381eeb7ef8e..e6be4393aaad 100644
>> --- a/arch/arm64/Kconfig
>> +++ b/arch/arm64/Kconfig
>> @@ -904,6 +904,22 @@ config HARDEN_BRANCH_PREDICTOR
>>  
>>  	  If unsure, say Y.
>>  
>> +config HARDEN_EL2_VECTORS
>> +	bool "Harden EL2 vector mapping against system register leak" if EXPERT
>> +	default y
>> +	help
>> +	  Speculation attacks against some high-performance processors can
>> +	  be used to leak privileged information such as the vector base
>> +	  register, resulting in a potential defeat of the EL2 layout
>> +	  randomization.
>> +
>> +	  This config option will map the vectors to a fixed location,
>> +	  independent of the EL2 code mapping, so that revealing VBAR_EL2
>> +	  to an attacker does no give away any extra information. This
>> +	  only gets enabled on affected CPUs.
>> +
>> +	  If unsure, say Y.
>> +
>>  menuconfig ARMV8_DEPRECATED
>>  	bool "Emulate deprecated/obsolete ARMv8 instructions"
>>  	depends on COMPAT
>> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
>> index 3da9e5aea936..433d13d0c271 100644
>> --- a/arch/arm64/include/asm/kvm_mmu.h
>> +++ b/arch/arm64/include/asm/kvm_mmu.h
>> @@ -360,33 +360,90 @@ static inline unsigned int kvm_get_vmid_bits(void)
>>  	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
>>  }
>>  
>> -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
>> +#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
>> +     defined(CONFIG_HARDEN_EL2_VECTORS))
>> +/*
>> + * EL2 vectors can be mapped and rerouted in a number of ways,
>> + * depending on the kernel configuration and CPU present:
>> + *
>> + * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
>> + *   hardening sequence is placed in one of the vector slots, which is
>> + *   executed before jumping to the real vectors.
>> + *
>> + * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS and BP
>> + *   hardening, the slot containing the hardening sequence is mapped
>> + *   next to the idmap page, and executed before jumping to the real
>> + *   vectors.
>> + *
>> + * - If the CPU only has ARM64_HARDEN_EL2_VECTORS, then an empty slot
>> + *   is selected, mapped next to the idmap page, and executed before
>> + *   jumping to the real vectors.
>> + *
>> + * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
>> + * VHE, as we don't have hypervisor-specific mappings. If the system
>> + * is VHE and yet selects this capability, it will be ignored.
>> + */
>>  #include <asm/mmu.h>
>>  
>> +extern void *__kvm_bp_vect_base;
>> +extern int __kvm_harden_el2_vector_slot;
>> +
>>  static inline void *kvm_get_hyp_vector(void)
>>  {
>>  	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
>> -	void *vect = kvm_ksym_ref(__kvm_hyp_vector);
>> +	int slot = -1;
>> +
>> +	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn)
>> +		slot = data->hyp_vectors_slot;
>> +
>> +	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) &&
>> +	    !has_vhe() && slot == -1)
>> +		slot = __kvm_harden_el2_vector_slot;
>>  
>> -	if (data->fn) {
>> -		vect = __bp_harden_hyp_vecs_start +
>> -		       data->hyp_vectors_slot * SZ_2K;
>> +	if (slot != -1) {
>> +		void *vect;
>>  
>>  		if (!has_vhe())
>> -			vect = lm_alias(vect);
>> +			vect = __kvm_bp_vect_base;
>> +		else
>> +			vect = __bp_harden_hyp_vecs_start;
>> +		vect += slot * SZ_2K;
>> +
>> +		return vect;
>>  	}
>>  
>> -	vect = kern_hyp_va(vect);
>> -	return vect;
>> +	return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
>>  }
> 
> I had trouble reading the above function. How about something like?
> (Assuming I got the logic right.)
> 
>  static inline void *kvm_get_hyp_vector(void)
>  {
>         struct bp_hardening_data *data = arm64_get_bp_hardening_data();
>         void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
>         int slot = -1;
> 
>         if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
>                 vect = __bp_harden_hyp_vecs_start;
>                 slot = data->hyp_vectors_slot;
>         }
> 
>         if (!has_vhe() && cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
>                 vect = __kvm_bp_vect_base;
>                 if (slot == -1)
>                         slot = __kvm_harden_el2_vector_slot;
>         }
> 
>         if (slot != -1)
>                 vect += slot * SZ_2K;
> 
>         return vect;
>  }

Yes, this looks much more palatable than my initial approach. I'll use that.

> 
>>  
>> +/*  This is only called on a !VHE system */
>>  static inline int kvm_map_vectors(void)
>>  {
>> -	return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
>> -				   kvm_ksym_ref(__bp_harden_hyp_vecs_end),
>> -				   PAGE_HYP_EXEC);
>> -}
>> +	phys_addr_t vect_pa = virt_to_phys(__bp_harden_hyp_vecs_start);
>> +	unsigned long size = __bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start;
> 
> nit: We only use these expressions once, so could probably do away with
> the variables. Or the variables could be tucked into the block they're
> used in below.
> 
>> +
>> +	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
> 
> Moving the create_hyp_mappings() under this cap check looks like a fix
> that could be posted separately?

We could. Not sure that's a big deal though.

> 
>> +		int ret;
>> +
>> +		ret = create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
>> +					  kvm_ksym_ref(__bp_harden_hyp_vecs_end),
>> +					  PAGE_HYP_EXEC);
>> +
>> +		if (ret)
>> +			return ret;
>> +
>> +		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
>> +		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
>> +	}
>> +
>> +	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
>> +		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
> 
> If I understood the logic in the above function correctly, then we won't
> be using this slot when we have the ARM64_HARDEN_BRANCH_PREDICTOR cap.
> Should we even bother allocating it when we don't intend to use it?

You could be on a system with a mix of affected and non-affected CPUs,
and the capability only tells you that at least one of the CPUs is
affected. For the non affected CPUs, we won't have a slot (since there
is no workaround to provide), unless we actually allocated one.

Thanks,

	M.
Andrew Jones March 13, 2018, 11:14 a.m. UTC | #5
On Tue, Mar 13, 2018 at 10:30:12AM +0000, Marc Zyngier wrote:
> >> +
> >> +    if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
> > 
> > Moving the create_hyp_mappings() under this cap check looks like a fix
> > that could be posted separately?
> 
> We could. Not sure that's a big deal though.

Nah, it isn't worth the fuss.

> >> +
> >> +	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
> >> +		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
> > 
> > If I understood the logic in the above function correctly, then we won't
> > be using this slot when we have the ARM64_HARDEN_BRANCH_PREDICTOR cap.
> > Should we even bother allocating it when we don't intend to use it?
> 
> You could be on a system with a mix of affected and non-affected CPUs,
> and the capability only tells you that at least one of the CPUs is
> affected. For the non affected CPUs, we won't have a slot (since there
> is no workaround to provide), unless we actually allocated one.
>

Ah yes, the heterogeneous considerations that I always forget to consider.

Thanks,
drew
James Morse March 14, 2018, 11:40 a.m. UTC | #6
Hi Marc,

On 01/03/18 15:55, Marc Zyngier wrote:
> We're now ready to map our vectors in weird and wonderful locations.
> On enabling ARM64_HARDEN_EL2_VECTORS, a vector slots gets allocated
> if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
> and gets mapped outside of the normal RAM region, next to the
> idmap.
> 
> That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
> of the rest of the hypervisor code.

> diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
> index c58cc5dbe667..c5dab30d3389 100644
> --- a/Documentation/arm64/memory.txt
> +++ b/Documentation/arm64/memory.txt
> @@ -90,7 +90,8 @@ When using KVM without the Virtualization Host Extensions, the
>  hypervisor maps kernel pages in EL2 at a fixed (and potentially
>  random) offset from the linear mapping. See the kern_hyp_va macro and
>  kvm_update_va_mask function for more details. MMIO devices such as
> -GICv2 gets mapped next to the HYP idmap page.
> +GICv2 gets mapped next to the HYP idmap page, as do vectors when
> +ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
>  
>  When using KVM with the Virtualization Host Extensions, no additional
>  mappings are created, since the host kernel runs directly in EL2.

> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 3da9e5aea936..433d13d0c271 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -360,33 +360,90 @@ static inline unsigned int kvm_get_vmid_bits(void)
>  	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
>  }
>  
> -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
> +#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
> +     defined(CONFIG_HARDEN_EL2_VECTORS))
> +/*
> + * EL2 vectors can be mapped and rerouted in a number of ways,
> + * depending on the kernel configuration and CPU present:
> + *
> + * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
> + *   hardening sequence is placed in one of the vector slots, which is
> + *   executed before jumping to the real vectors.
> + *
> + * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS and BP
> + *   hardening, the slot containing the hardening sequence is mapped
> + *   next to the idmap page, and executed before jumping to the real
> + *   vectors.
> + *
> + * - If the CPU only has ARM64_HARDEN_EL2_VECTORS, then an empty slot
> + *   is selected, mapped next to the idmap page, and executed before
> + *   jumping to the real vectors.

> + * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
> + * VHE, as we don't have hypervisor-specific mappings. If the system
> + * is VHE and yet selects this capability, it will be ignored.

Silently? This isn't a problem as the CPUs you enable this for don't have VHE.
Is it worth a warning? If we did ever need to support it, we can pull the same
trick the arch code uses, using a fixmap entry for the vectors.


> + */
>  #include <asm/mmu.h>
>  
> +extern void *__kvm_bp_vect_base;
> +extern int __kvm_harden_el2_vector_slot;
> +
>  static inline void *kvm_get_hyp_vector(void)
>  {
>  	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
> -	void *vect = kvm_ksym_ref(__kvm_hyp_vector);
> +	int slot = -1;
> +
> +	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn)
> +		slot = data->hyp_vectors_slot;
> +
> +	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) &&
> +	    !has_vhe() && slot == -1)
> +		slot = __kvm_harden_el2_vector_slot;
>  
> -	if (data->fn) {
> -		vect = __bp_harden_hyp_vecs_start +
> -		       data->hyp_vectors_slot * SZ_2K;
> +	if (slot != -1) {
> +		void *vect;
>  
>  		if (!has_vhe())
> -			vect = lm_alias(vect);
> +			vect = __kvm_bp_vect_base;
> +		else
> +			vect = __bp_harden_hyp_vecs_start;
> +		vect += slot * SZ_2K;
> +
> +		return vect;
>  	}
>  
> -	vect = kern_hyp_va(vect);
> -	return vect;
> +	return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
>  }
>  
> +/*  This is only called on a !VHE system */
>  static inline int kvm_map_vectors(void)
>  {
> -	return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> -				   kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> -				   PAGE_HYP_EXEC);
> -}
> +	phys_addr_t vect_pa = virt_to_phys(__bp_harden_hyp_vecs_start);
> +	unsigned long size = __bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start;
> +
> +	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
> +		int ret;
> +
> +		ret = create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> +					  kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> +					  PAGE_HYP_EXEC);

We don't have to do this for the regular vectors, as they are part of the
__hyp_text. How come these aren't?

The existing Makefile depends on KVM to build these. How come it isn't under
arch/arm64/kvm?


> +		if (ret)
> +			return ret;
> +
> +		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
> +		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
> +	}
> +
> +	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
> +		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
> +		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
> +		return create_hyp_exec_mappings(vect_pa, size,
> +						&__kvm_bp_vect_base);
> +	}
>  
> +	return 0;
> +}
>  #else
>  static inline void *kvm_get_hyp_vector(void)
>  {

> diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
> index b87541360f43..e7fc471c91a6 100644
> --- a/arch/arm64/kernel/Makefile
> +++ b/arch/arm64/kernel/Makefile
> @@ -54,8 +54,8 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
>  arm64-obj-$(CONFIG_CRASH_DUMP)		+= crash_dump.o
>  arm64-obj-$(CONFIG_ARM_SDE_INTERFACE)	+= sdei.o
>  
> -ifeq ($(CONFIG_KVM),y)
> -arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR)	+= bpi.o
> +ifneq ($(filter y,$(CONFIG_HARDEN_BRANCH_PREDICTOR) $(CONFIG_HARDEN_EL2_VECTORS)),)
> +arm64-obj-$(CONFIG_KVM)			+= bpi.o
>  endif

Isn't Kconfig 'select'ing a hidden-option the usual way this is done?


Thanks,

James
diff mbox

Patch

diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
index c58cc5dbe667..c5dab30d3389 100644
--- a/Documentation/arm64/memory.txt
+++ b/Documentation/arm64/memory.txt
@@ -90,7 +90,8 @@  When using KVM without the Virtualization Host Extensions, the
 hypervisor maps kernel pages in EL2 at a fixed (and potentially
 random) offset from the linear mapping. See the kern_hyp_va macro and
 kvm_update_va_mask function for more details. MMIO devices such as
-GICv2 gets mapped next to the HYP idmap page.
+GICv2 gets mapped next to the HYP idmap page, as do vectors when
+ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
 
 When using KVM with the Virtualization Host Extensions, no additional
 mappings are created, since the host kernel runs directly in EL2.
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7381eeb7ef8e..e6be4393aaad 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -904,6 +904,22 @@  config HARDEN_BRANCH_PREDICTOR
 
 	  If unsure, say Y.
 
+config HARDEN_EL2_VECTORS
+	bool "Harden EL2 vector mapping against system register leak" if EXPERT
+	default y
+	help
+	  Speculation attacks against some high-performance processors can
+	  be used to leak privileged information such as the vector base
+	  register, resulting in a potential defeat of the EL2 layout
+	  randomization.
+
+	  This config option will map the vectors to a fixed location,
+	  independent of the EL2 code mapping, so that revealing VBAR_EL2
+	  to an attacker does no give away any extra information. This
+	  only gets enabled on affected CPUs.
+
+	  If unsure, say Y.
+
 menuconfig ARMV8_DEPRECATED
 	bool "Emulate deprecated/obsolete ARMv8 instructions"
 	depends on COMPAT
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 3da9e5aea936..433d13d0c271 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -360,33 +360,90 @@  static inline unsigned int kvm_get_vmid_bits(void)
 	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
+     defined(CONFIG_HARDEN_EL2_VECTORS))
+/*
+ * EL2 vectors can be mapped and rerouted in a number of ways,
+ * depending on the kernel configuration and CPU present:
+ *
+ * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
+ *   hardening sequence is placed in one of the vector slots, which is
+ *   executed before jumping to the real vectors.
+ *
+ * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS and BP
+ *   hardening, the slot containing the hardening sequence is mapped
+ *   next to the idmap page, and executed before jumping to the real
+ *   vectors.
+ *
+ * - If the CPU only has ARM64_HARDEN_EL2_VECTORS, then an empty slot
+ *   is selected, mapped next to the idmap page, and executed before
+ *   jumping to the real vectors.
+ *
+ * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
+ * VHE, as we don't have hypervisor-specific mappings. If the system
+ * is VHE and yet selects this capability, it will be ignored.
+ */
 #include <asm/mmu.h>
 
+extern void *__kvm_bp_vect_base;
+extern int __kvm_harden_el2_vector_slot;
+
 static inline void *kvm_get_hyp_vector(void)
 {
 	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
-	void *vect = kvm_ksym_ref(__kvm_hyp_vector);
+	int slot = -1;
+
+	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn)
+		slot = data->hyp_vectors_slot;
+
+	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) &&
+	    !has_vhe() && slot == -1)
+		slot = __kvm_harden_el2_vector_slot;
 
-	if (data->fn) {
-		vect = __bp_harden_hyp_vecs_start +
-		       data->hyp_vectors_slot * SZ_2K;
+	if (slot != -1) {
+		void *vect;
 
 		if (!has_vhe())
-			vect = lm_alias(vect);
+			vect = __kvm_bp_vect_base;
+		else
+			vect = __bp_harden_hyp_vecs_start;
+		vect += slot * SZ_2K;
+
+		return vect;
 	}
 
-	vect = kern_hyp_va(vect);
-	return vect;
+	return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
 }
 
+/*  This is only called on a !VHE system */
 static inline int kvm_map_vectors(void)
 {
-	return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
-				   kvm_ksym_ref(__bp_harden_hyp_vecs_end),
-				   PAGE_HYP_EXEC);
-}
+	phys_addr_t vect_pa = virt_to_phys(__bp_harden_hyp_vecs_start);
+	unsigned long size = __bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start;
+
+	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
+		int ret;
+
+		ret = create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
+					  kvm_ksym_ref(__bp_harden_hyp_vecs_end),
+					  PAGE_HYP_EXEC);
+
+		if (ret)
+			return ret;
+
+		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
+		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+	}
+
+	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
+		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
+		return create_hyp_exec_mappings(vect_pa, size,
+						&__kvm_bp_vect_base);
+	}
 
+	return 0;
+}
 #else
 static inline void *kvm_get_hyp_vector(void)
 {
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 3baf010fe883..0b0cc69031c1 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -51,10 +51,12 @@  struct bp_hardening_data {
 	bp_hardening_cb_t	fn;
 };
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
+     defined(CONFIG_HARDEN_EL2_VECTORS))
 extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
 extern atomic_t arm64_el2_vector_last_slot;
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 
 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
@@ -81,6 +83,7 @@  static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
 
 static inline void arm64_apply_bp_hardening(void)	{ }
 #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
+#endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
 
 extern void paging_init(void);
 extern void bootmem_init(void);
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index b87541360f43..e7fc471c91a6 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -54,8 +54,8 @@  arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
 arm64-obj-$(CONFIG_CRASH_DUMP)		+= crash_dump.o
 arm64-obj-$(CONFIG_ARM_SDE_INTERFACE)	+= sdei.o
 
-ifeq ($(CONFIG_KVM),y)
-arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR)	+= bpi.o
+ifneq ($(filter y,$(CONFIG_HARDEN_BRANCH_PREDICTOR) $(CONFIG_HARDEN_EL2_VECTORS)),)
+arm64-obj-$(CONFIG_KVM)			+= bpi.o
 endif
 
 obj-y					+= $(arm64-obj-y) vdso/ probes/
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 7ef3d920c8d4..5d17bb50287c 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -153,6 +153,9 @@  void __init kvm_update_va_mask(struct alt_instr *alt,
 	}
 }
 
+void *__kvm_bp_vect_base;
+int __kvm_harden_el2_vector_slot;
+
 void kvm_patch_vector_branch(struct alt_instr *alt,
 			     __le32 *origptr, __le32 *updptr, int nr_inst)
 {