[v5,03/10] arm64: add sysfs vulnerability show for meltdown
diff mbox series

Message ID 20190227010544.597579-4-jeremy.linton@arm.com
State New, archived
Headers show
Series
  • arm64: add system vulnerability sysfs entries
Related show

Commit Message

Jeremy Linton Feb. 27, 2019, 1:05 a.m. UTC
Display the mitigation status if active, otherwise
assume the cpu is safe unless it doesn't have CSV3
and isn't in our whitelist.

Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
---
 arch/arm64/kernel/cpufeature.c | 47 ++++++++++++++++++++++++++--------
 1 file changed, 37 insertions(+), 10 deletions(-)

Comments

Suzuki K Poulose Feb. 28, 2019, 6:33 p.m. UTC | #1
On 27/02/2019 01:05, Jeremy Linton wrote:
> Display the mitigation status if active, otherwise
> assume the cpu is safe unless it doesn't have CSV3
> and isn't in our whitelist.
> 
> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>

Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
André Przywara March 1, 2019, 7:11 a.m. UTC | #2
Hi,

On 2/26/19 7:05 PM, Jeremy Linton wrote:
> Display the mitigation status if active, otherwise
> assume the cpu is safe unless it doesn't have CSV3
> and isn't in our whitelist.
> 
> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
> ---
>   arch/arm64/kernel/cpufeature.c | 47 ++++++++++++++++++++++++++--------
>   1 file changed, 37 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index f6d84e2c92fe..d31bd770acba 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -944,7 +944,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
>   	return has_cpuid_feature(entry, scope);
>   }
>   
> -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
> +static bool __meltdown_safe = true;
>   static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
>   
>   static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
> @@ -963,6 +963,16 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
>   		{ /* sentinel */ }
>   	};
>   	char const *str = "command line option";
> +	bool meltdown_safe;
> +
> +	meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
> +
> +	/* Defer to CPU feature registers */
> +	if (has_cpuid_feature(entry, scope))
> +		meltdown_safe = true;
> +
> +	if (!meltdown_safe)
> +		__meltdown_safe = false;
>   
>   	/*
>   	 * For reasons that aren't entirely clear, enabling KPTI on Cavium
> @@ -974,6 +984,11 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
>   		__kpti_forced = -1;
>   	}
>   
> +	if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
> +		pr_info_once("kernel page table isolation disabled by CONFIG\n");
> +		return false;
> +	}
> +
>   	/* Forced? */
>   	if (__kpti_forced) {
>   		pr_info_once("kernel page table isolation forced %s by %s\n",
> @@ -985,14 +1000,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
>   	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
>   		return kaslr_offset() > 0;
>   
> -	/* Don't force KPTI for CPUs that are not vulnerable */
> -	if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
> -		return false;
> -
> -	/* Defer to CPU feature registers */
> -	return !has_cpuid_feature(entry, scope);
> +	return !meltdown_safe;
>   }
>   
> +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>   static void
>   kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
>   {
> @@ -1022,6 +1033,13 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
>   
>   	return;
>   }
> +#else
> +static void
> +kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
> +{
> +}
> +#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
> +
>   
>   static int __init parse_kpti(char *str)
>   {
> @@ -1035,7 +1053,6 @@ static int __init parse_kpti(char *str)
>   	return 0;
>   }
>   early_param("kpti", parse_kpti);
> -#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
>   
>   #ifdef CONFIG_ARM64_HW_AFDBM
>   static inline void __cpu_enable_hw_dbm(void)
> @@ -1286,7 +1303,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>   		.field_pos = ID_AA64PFR0_EL0_SHIFT,
>   		.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
>   	},
> -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>   	{
>   		.desc = "Kernel page table isolation (KPTI)",
>   		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
> @@ -1302,7 +1318,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>   		.matches = unmap_kernel_at_el0,
>   		.cpu_enable = kpti_install_ng_mappings,
>   	},
> -#endif
>   	{
>   		/* FP/SIMD is not implemented */
>   		.capability = ARM64_HAS_NO_FPSIMD,
> @@ -2063,3 +2078,15 @@ static int __init enable_mrs_emulation(void)
>   }
>   
>   core_initcall(enable_mrs_emulation);
> +
> +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
> +		char *buf)
> +{
> +	if (arm64_kernel_unmapped_at_el0())
> +		return sprintf(buf, "Mitigation: KPTI\n");
> +
> +	if (__meltdown_safe)
> +		return sprintf(buf, "Not affected\n");

Shall those two checks be swapped? So it doesn't report about a KPTI 
mitigation if the CPU is safe, but we enable KPTI because of KASLR 
having enabled it? Or is that a different knob?

Cheers,
Andre.

> +
> +	return sprintf(buf, "Vulnerable\n");
> +}
>
Jeremy Linton March 1, 2019, 4:12 p.m. UTC | #3
Hi,

On 3/1/19 1:11 AM, Andre Przywara wrote:
> Hi,
> 
> On 2/26/19 7:05 PM, Jeremy Linton wrote:
>> Display the mitigation status if active, otherwise
>> assume the cpu is safe unless it doesn't have CSV3
>> and isn't in our whitelist.
>>
>> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
>> ---
>>   arch/arm64/kernel/cpufeature.c | 47 ++++++++++++++++++++++++++--------
>>   1 file changed, 37 insertions(+), 10 deletions(-)
>>
>> diff --git a/arch/arm64/kernel/cpufeature.c 
>> b/arch/arm64/kernel/cpufeature.c
>> index f6d84e2c92fe..d31bd770acba 100644
>> --- a/arch/arm64/kernel/cpufeature.c
>> +++ b/arch/arm64/kernel/cpufeature.c
>> @@ -944,7 +944,7 @@ has_useable_cnp(const struct 
>> arm64_cpu_capabilities *entry, int scope)
>>       return has_cpuid_feature(entry, scope);
>>   }
>> -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>> +static bool __meltdown_safe = true;
>>   static int __kpti_forced; /* 0: not forced, >0: forced on, <0: 
>> forced off */
>>   static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities 
>> *entry,
>> @@ -963,6 +963,16 @@ static bool unmap_kernel_at_el0(const struct 
>> arm64_cpu_capabilities *entry,
>>           { /* sentinel */ }
>>       };
>>       char const *str = "command line option";
>> +    bool meltdown_safe;
>> +
>> +    meltdown_safe = is_midr_in_range_list(read_cpuid_id(), 
>> kpti_safe_list);
>> +
>> +    /* Defer to CPU feature registers */
>> +    if (has_cpuid_feature(entry, scope))
>> +        meltdown_safe = true;
>> +
>> +    if (!meltdown_safe)
>> +        __meltdown_safe = false;
>>       /*
>>        * For reasons that aren't entirely clear, enabling KPTI on Cavium
>> @@ -974,6 +984,11 @@ static bool unmap_kernel_at_el0(const struct 
>> arm64_cpu_capabilities *entry,
>>           __kpti_forced = -1;
>>       }
>> +    if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
>> +        pr_info_once("kernel page table isolation disabled by 
>> CONFIG\n");
>> +        return false;
>> +    }
>> +
>>       /* Forced? */
>>       if (__kpti_forced) {
>>           pr_info_once("kernel page table isolation forced %s by %s\n",
>> @@ -985,14 +1000,10 @@ static bool unmap_kernel_at_el0(const struct 
>> arm64_cpu_capabilities *entry,
>>       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
>>           return kaslr_offset() > 0;
>> -    /* Don't force KPTI for CPUs that are not vulnerable */
>> -    if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
>> -        return false;
>> -
>> -    /* Defer to CPU feature registers */
>> -    return !has_cpuid_feature(entry, scope);
>> +    return !meltdown_safe;
>>   }
>> +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>>   static void
>>   kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
>>   {
>> @@ -1022,6 +1033,13 @@ kpti_install_ng_mappings(const struct 
>> arm64_cpu_capabilities *__unused)
>>       return;
>>   }
>> +#else
>> +static void
>> +kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
>> +{
>> +}
>> +#endif    /* CONFIG_UNMAP_KERNEL_AT_EL0 */
>> +
>>   static int __init parse_kpti(char *str)
>>   {
>> @@ -1035,7 +1053,6 @@ static int __init parse_kpti(char *str)
>>       return 0;
>>   }
>>   early_param("kpti", parse_kpti);
>> -#endif    /* CONFIG_UNMAP_KERNEL_AT_EL0 */
>>   #ifdef CONFIG_ARM64_HW_AFDBM
>>   static inline void __cpu_enable_hw_dbm(void)
>> @@ -1286,7 +1303,6 @@ static const struct arm64_cpu_capabilities 
>> arm64_features[] = {
>>           .field_pos = ID_AA64PFR0_EL0_SHIFT,
>>           .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
>>       },
>> -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>>       {
>>           .desc = "Kernel page table isolation (KPTI)",
>>           .capability = ARM64_UNMAP_KERNEL_AT_EL0,
>> @@ -1302,7 +1318,6 @@ static const struct arm64_cpu_capabilities 
>> arm64_features[] = {
>>           .matches = unmap_kernel_at_el0,
>>           .cpu_enable = kpti_install_ng_mappings,
>>       },
>> -#endif
>>       {
>>           /* FP/SIMD is not implemented */
>>           .capability = ARM64_HAS_NO_FPSIMD,
>> @@ -2063,3 +2078,15 @@ static int __init enable_mrs_emulation(void)
>>   }
>>   core_initcall(enable_mrs_emulation);
>> +
>> +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute 
>> *attr,
>> +        char *buf)
>> +{
>> +    if (arm64_kernel_unmapped_at_el0())
>> +        return sprintf(buf, "Mitigation: KPTI\n");
>> +
>> +    if (__meltdown_safe)
>> +        return sprintf(buf, "Not affected\n");
> 
> Shall those two checks be swapped? So it doesn't report about a KPTI 
> mitigation if the CPU is safe, but we enable KPTI because of KASLR 
> having enabled it? Or is that a different knob?

Hmmm, I think having it this way reflects the fact that the machine is 
mitigated independent of whether it needed it. The force on case is 
similar. The machine may not have needed the mitigation but it was 
forced on.


> 
> Cheers,
> Andre.
>  
>> +
>> +    return sprintf(buf, "Vulnerable\n");
>> +}
>>
Catalin Marinas March 1, 2019, 4:20 p.m. UTC | #4
On Fri, Mar 01, 2019 at 10:12:09AM -0600, Jeremy Linton wrote:
> On 3/1/19 1:11 AM, Andre Przywara wrote:
> > On 2/26/19 7:05 PM, Jeremy Linton wrote:
> > > Display the mitigation status if active, otherwise
> > > assume the cpu is safe unless it doesn't have CSV3
> > > and isn't in our whitelist.
> > > 
> > > Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
> > > ---
> > >   arch/arm64/kernel/cpufeature.c | 47 ++++++++++++++++++++++++++--------
> > >   1 file changed, 37 insertions(+), 10 deletions(-)
> > > 
> > > diff --git a/arch/arm64/kernel/cpufeature.c
> > > b/arch/arm64/kernel/cpufeature.c
> > > index f6d84e2c92fe..d31bd770acba 100644
> > > --- a/arch/arm64/kernel/cpufeature.c
> > > +++ b/arch/arm64/kernel/cpufeature.c
> > > @@ -944,7 +944,7 @@ has_useable_cnp(const struct
> > > arm64_cpu_capabilities *entry, int scope)
> > >       return has_cpuid_feature(entry, scope);
> > >   }
> > > -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
> > > +static bool __meltdown_safe = true;
> > >   static int __kpti_forced; /* 0: not forced, >0: forced on, <0:
> > > forced off */
> > >   static bool unmap_kernel_at_el0(const struct
> > > arm64_cpu_capabilities *entry,
> > > @@ -963,6 +963,16 @@ static bool unmap_kernel_at_el0(const struct
> > > arm64_cpu_capabilities *entry,
> > >           { /* sentinel */ }
> > >       };
> > >       char const *str = "command line option";
> > > +    bool meltdown_safe;
> > > +
> > > +    meltdown_safe = is_midr_in_range_list(read_cpuid_id(),
> > > kpti_safe_list);
> > > +
> > > +    /* Defer to CPU feature registers */
> > > +    if (has_cpuid_feature(entry, scope))
> > > +        meltdown_safe = true;
> > > +
> > > +    if (!meltdown_safe)
> > > +        __meltdown_safe = false;
> > >       /*
> > >        * For reasons that aren't entirely clear, enabling KPTI on Cavium
> > > @@ -974,6 +984,11 @@ static bool unmap_kernel_at_el0(const struct
> > > arm64_cpu_capabilities *entry,
> > >           __kpti_forced = -1;
> > >       }
> > > +    if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
> > > +        pr_info_once("kernel page table isolation disabled by
> > > CONFIG\n");
> > > +        return false;
> > > +    }
> > > +
> > >       /* Forced? */
> > >       if (__kpti_forced) {
> > >           pr_info_once("kernel page table isolation forced %s by %s\n",
> > > @@ -985,14 +1000,10 @@ static bool unmap_kernel_at_el0(const struct
> > > arm64_cpu_capabilities *entry,
> > >       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
> > >           return kaslr_offset() > 0;
> > > -    /* Don't force KPTI for CPUs that are not vulnerable */
> > > -    if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
> > > -        return false;
> > > -
> > > -    /* Defer to CPU feature registers */
> > > -    return !has_cpuid_feature(entry, scope);
> > > +    return !meltdown_safe;
> > >   }
> > > +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
> > >   static void
> > >   kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
> > >   {
> > > @@ -1022,6 +1033,13 @@ kpti_install_ng_mappings(const struct
> > > arm64_cpu_capabilities *__unused)
> > >       return;
> > >   }
> > > +#else
> > > +static void
> > > +kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
> > > +{
> > > +}
> > > +#endif    /* CONFIG_UNMAP_KERNEL_AT_EL0 */
> > > +
> > >   static int __init parse_kpti(char *str)
> > >   {
> > > @@ -1035,7 +1053,6 @@ static int __init parse_kpti(char *str)
> > >       return 0;
> > >   }
> > >   early_param("kpti", parse_kpti);
> > > -#endif    /* CONFIG_UNMAP_KERNEL_AT_EL0 */
> > >   #ifdef CONFIG_ARM64_HW_AFDBM
> > >   static inline void __cpu_enable_hw_dbm(void)
> > > @@ -1286,7 +1303,6 @@ static const struct arm64_cpu_capabilities
> > > arm64_features[] = {
> > >           .field_pos = ID_AA64PFR0_EL0_SHIFT,
> > >           .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
> > >       },
> > > -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
> > >       {
> > >           .desc = "Kernel page table isolation (KPTI)",
> > >           .capability = ARM64_UNMAP_KERNEL_AT_EL0,
> > > @@ -1302,7 +1318,6 @@ static const struct arm64_cpu_capabilities
> > > arm64_features[] = {
> > >           .matches = unmap_kernel_at_el0,
> > >           .cpu_enable = kpti_install_ng_mappings,
> > >       },
> > > -#endif
> > >       {
> > >           /* FP/SIMD is not implemented */
> > >           .capability = ARM64_HAS_NO_FPSIMD,
> > > @@ -2063,3 +2078,15 @@ static int __init enable_mrs_emulation(void)
> > >   }
> > >   core_initcall(enable_mrs_emulation);
> > > +
> > > +ssize_t cpu_show_meltdown(struct device *dev, struct
> > > device_attribute *attr,
> > > +        char *buf)
> > > +{
> > > +    if (arm64_kernel_unmapped_at_el0())
> > > +        return sprintf(buf, "Mitigation: KPTI\n");
> > > +
> > > +    if (__meltdown_safe)
> > > +        return sprintf(buf, "Not affected\n");
> > 
> > Shall those two checks be swapped? So it doesn't report about a KPTI
> > mitigation if the CPU is safe, but we enable KPTI because of KASLR
> > having enabled it? Or is that a different knob?
> 
> Hmmm, I think having it this way reflects the fact that the machine is
> mitigated independent of whether it needed it. The force on case is similar.
> The machine may not have needed the mitigation but it was forced on.

So is this patchset about showing vulnerabilities _and_ mitigations or
just one of them?
Jeremy Linton March 1, 2019, 4:53 p.m. UTC | #5
Hi,

On 3/1/19 10:20 AM, Catalin Marinas wrote:
> On Fri, Mar 01, 2019 at 10:12:09AM -0600, Jeremy Linton wrote:
>> On 3/1/19 1:11 AM, Andre Przywara wrote:
>>> On 2/26/19 7:05 PM, Jeremy Linton wrote:
>>>> Display the mitigation status if active, otherwise
>>>> assume the cpu is safe unless it doesn't have CSV3
>>>> and isn't in our whitelist.
>>>>
>>>> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
>>>> ---
>>>>    arch/arm64/kernel/cpufeature.c | 47 ++++++++++++++++++++++++++--------
>>>>    1 file changed, 37 insertions(+), 10 deletions(-)
>>>>
>>>> diff --git a/arch/arm64/kernel/cpufeature.c
>>>> b/arch/arm64/kernel/cpufeature.c
>>>> index f6d84e2c92fe..d31bd770acba 100644
>>>> --- a/arch/arm64/kernel/cpufeature.c
>>>> +++ b/arch/arm64/kernel/cpufeature.c
>>>> @@ -944,7 +944,7 @@ has_useable_cnp(const struct
>>>> arm64_cpu_capabilities *entry, int scope)
>>>>        return has_cpuid_feature(entry, scope);
>>>>    }
>>>> -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>>>> +static bool __meltdown_safe = true;
>>>>    static int __kpti_forced; /* 0: not forced, >0: forced on, <0:
>>>> forced off */
>>>>    static bool unmap_kernel_at_el0(const struct
>>>> arm64_cpu_capabilities *entry,
>>>> @@ -963,6 +963,16 @@ static bool unmap_kernel_at_el0(const struct
>>>> arm64_cpu_capabilities *entry,
>>>>            { /* sentinel */ }
>>>>        };
>>>>        char const *str = "command line option";
>>>> +    bool meltdown_safe;
>>>> +
>>>> +    meltdown_safe = is_midr_in_range_list(read_cpuid_id(),
>>>> kpti_safe_list);
>>>> +
>>>> +    /* Defer to CPU feature registers */
>>>> +    if (has_cpuid_feature(entry, scope))
>>>> +        meltdown_safe = true;
>>>> +
>>>> +    if (!meltdown_safe)
>>>> +        __meltdown_safe = false;
>>>>        /*
>>>>         * For reasons that aren't entirely clear, enabling KPTI on Cavium
>>>> @@ -974,6 +984,11 @@ static bool unmap_kernel_at_el0(const struct
>>>> arm64_cpu_capabilities *entry,
>>>>            __kpti_forced = -1;
>>>>        }
>>>> +    if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
>>>> +        pr_info_once("kernel page table isolation disabled by
>>>> CONFIG\n");
>>>> +        return false;
>>>> +    }
>>>> +
>>>>        /* Forced? */
>>>>        if (__kpti_forced) {
>>>>            pr_info_once("kernel page table isolation forced %s by %s\n",
>>>> @@ -985,14 +1000,10 @@ static bool unmap_kernel_at_el0(const struct
>>>> arm64_cpu_capabilities *entry,
>>>>        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
>>>>            return kaslr_offset() > 0;
>>>> -    /* Don't force KPTI for CPUs that are not vulnerable */
>>>> -    if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
>>>> -        return false;
>>>> -
>>>> -    /* Defer to CPU feature registers */
>>>> -    return !has_cpuid_feature(entry, scope);
>>>> +    return !meltdown_safe;
>>>>    }
>>>> +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>>>>    static void
>>>>    kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
>>>>    {
>>>> @@ -1022,6 +1033,13 @@ kpti_install_ng_mappings(const struct
>>>> arm64_cpu_capabilities *__unused)
>>>>        return;
>>>>    }
>>>> +#else
>>>> +static void
>>>> +kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
>>>> +{
>>>> +}
>>>> +#endif    /* CONFIG_UNMAP_KERNEL_AT_EL0 */
>>>> +
>>>>    static int __init parse_kpti(char *str)
>>>>    {
>>>> @@ -1035,7 +1053,6 @@ static int __init parse_kpti(char *str)
>>>>        return 0;
>>>>    }
>>>>    early_param("kpti", parse_kpti);
>>>> -#endif    /* CONFIG_UNMAP_KERNEL_AT_EL0 */
>>>>    #ifdef CONFIG_ARM64_HW_AFDBM
>>>>    static inline void __cpu_enable_hw_dbm(void)
>>>> @@ -1286,7 +1303,6 @@ static const struct arm64_cpu_capabilities
>>>> arm64_features[] = {
>>>>            .field_pos = ID_AA64PFR0_EL0_SHIFT,
>>>>            .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
>>>>        },
>>>> -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>>>>        {
>>>>            .desc = "Kernel page table isolation (KPTI)",
>>>>            .capability = ARM64_UNMAP_KERNEL_AT_EL0,
>>>> @@ -1302,7 +1318,6 @@ static const struct arm64_cpu_capabilities
>>>> arm64_features[] = {
>>>>            .matches = unmap_kernel_at_el0,
>>>>            .cpu_enable = kpti_install_ng_mappings,
>>>>        },
>>>> -#endif
>>>>        {
>>>>            /* FP/SIMD is not implemented */
>>>>            .capability = ARM64_HAS_NO_FPSIMD,
>>>> @@ -2063,3 +2078,15 @@ static int __init enable_mrs_emulation(void)
>>>>    }
>>>>    core_initcall(enable_mrs_emulation);
>>>> +
>>>> +ssize_t cpu_show_meltdown(struct device *dev, struct
>>>> device_attribute *attr,
>>>> +        char *buf)
>>>> +{
>>>> +    if (arm64_kernel_unmapped_at_el0())
>>>> +        return sprintf(buf, "Mitigation: KPTI\n");
>>>> +
>>>> +    if (__meltdown_safe)
>>>> +        return sprintf(buf, "Not affected\n");
>>>
>>> Shall those two checks be swapped? So it doesn't report about a KPTI
>>> mitigation if the CPU is safe, but we enable KPTI because of KASLR
>>> having enabled it? Or is that a different knob?
>>
>> Hmmm, I think having it this way reflects the fact that the machine is
>> mitigated independent of whether it needed it. The force on case is similar.
>> The machine may not have needed the mitigation but it was forced on.
> 
> So is this patchset about showing vulnerabilities _and_ mitigations or
> just one of them?
> 

Well, I don't think there is a way to express a mitigated but not 
vulnerable state in the current ABI. This set is mostly just to bring us 
in line with the current ABI expectations.
Catalin Marinas March 1, 2019, 5:15 p.m. UTC | #6
On Fri, Mar 01, 2019 at 10:53:50AM -0600, Jeremy Linton wrote:
> On 3/1/19 10:20 AM, Catalin Marinas wrote:
> > On Fri, Mar 01, 2019 at 10:12:09AM -0600, Jeremy Linton wrote:
> > > On 3/1/19 1:11 AM, Andre Przywara wrote:
> > > > On 2/26/19 7:05 PM, Jeremy Linton wrote:
> > > > > +ssize_t cpu_show_meltdown(struct device *dev, struct
> > > > > device_attribute *attr,
> > > > > +        char *buf)
> > > > > +{
> > > > > +    if (arm64_kernel_unmapped_at_el0())
> > > > > +        return sprintf(buf, "Mitigation: KPTI\n");
> > > > > +
> > > > > +    if (__meltdown_safe)
> > > > > +        return sprintf(buf, "Not affected\n");
> > > > 
> > > > Shall those two checks be swapped? So it doesn't report about a KPTI
> > > > mitigation if the CPU is safe, but we enable KPTI because of KASLR
> > > > having enabled it? Or is that a different knob?
> > > 
> > > Hmmm, I think having it this way reflects the fact that the machine is
> > > mitigated independent of whether it needed it. The force on case is similar.
> > > The machine may not have needed the mitigation but it was forced on.
> > 
> > So is this patchset about showing vulnerabilities _and_ mitigations or
> > just one of them?
> 
> Well, I don't think there is a way to express a mitigated but not vulnerable
> state in the current ABI. This set is mostly just to bring us in line with
> the current ABI expectations.

Looking at the ABI doc, it states:

	"Not affected"	  CPU is not affected by the vulnerability
	"Vulnerable"	  CPU is affected and no mitigation in effect
	"Mitigation: $M"  CPU is affected and mitigation $M is in effect

So, yes, we don't have mitigated but not vulnerable. Therefore I think
we should stick to "not affected" and swap the lines above as per
Andre's comment. This file is about Meltdown vulnerability and
mitigation, not KASLR hardening.
André Przywara March 1, 2019, 5:30 p.m. UTC | #7
Hi,

On 3/1/19 10:53 AM, Jeremy Linton wrote:
> Hi,
> 
> On 3/1/19 10:20 AM, Catalin Marinas wrote:
>> On Fri, Mar 01, 2019 at 10:12:09AM -0600, Jeremy Linton wrote:
>>> On 3/1/19 1:11 AM, Andre Przywara wrote:
>>>> On 2/26/19 7:05 PM, Jeremy Linton wrote:
>>>>> Display the mitigation status if active, otherwise
>>>>> assume the cpu is safe unless it doesn't have CSV3
>>>>> and isn't in our whitelist.
>>>>>
>>>>> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
>>>>> ---
>>>>>    arch/arm64/kernel/cpufeature.c | 47 
>>>>> ++++++++++++++++++++++++++--------
>>>>>    1 file changed, 37 insertions(+), 10 deletions(-)
>>>>>
>>>>> diff --git a/arch/arm64/kernel/cpufeature.c
>>>>> b/arch/arm64/kernel/cpufeature.c
>>>>> index f6d84e2c92fe..d31bd770acba 100644
>>>>> --- a/arch/arm64/kernel/cpufeature.c
>>>>> +++ b/arch/arm64/kernel/cpufeature.c
>>>>> @@ -944,7 +944,7 @@ has_useable_cnp(const struct
>>>>> arm64_cpu_capabilities *entry, int scope)
>>>>>        return has_cpuid_feature(entry, scope);
>>>>>    }
>>>>> -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>>>>> +static bool __meltdown_safe = true;
>>>>>    static int __kpti_forced; /* 0: not forced, >0: forced on, <0:
>>>>> forced off */
>>>>>    static bool unmap_kernel_at_el0(const struct
>>>>> arm64_cpu_capabilities *entry,
>>>>> @@ -963,6 +963,16 @@ static bool unmap_kernel_at_el0(const struct
>>>>> arm64_cpu_capabilities *entry,
>>>>>            { /* sentinel */ }
>>>>>        };
>>>>>        char const *str = "command line option";
>>>>> +    bool meltdown_safe;
>>>>> +
>>>>> +    meltdown_safe = is_midr_in_range_list(read_cpuid_id(),
>>>>> kpti_safe_list);
>>>>> +
>>>>> +    /* Defer to CPU feature registers */
>>>>> +    if (has_cpuid_feature(entry, scope))
>>>>> +        meltdown_safe = true;
>>>>> +
>>>>> +    if (!meltdown_safe)
>>>>> +        __meltdown_safe = false;
>>>>>        /*
>>>>>         * For reasons that aren't entirely clear, enabling KPTI on 
>>>>> Cavium
>>>>> @@ -974,6 +984,11 @@ static bool unmap_kernel_at_el0(const struct
>>>>> arm64_cpu_capabilities *entry,
>>>>>            __kpti_forced = -1;
>>>>>        }
>>>>> +    if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
>>>>> +        pr_info_once("kernel page table isolation disabled by
>>>>> CONFIG\n");
>>>>> +        return false;
>>>>> +    }
>>>>> +
>>>>>        /* Forced? */
>>>>>        if (__kpti_forced) {
>>>>>            pr_info_once("kernel page table isolation forced %s by 
>>>>> %s\n",
>>>>> @@ -985,14 +1000,10 @@ static bool unmap_kernel_at_el0(const struct
>>>>> arm64_cpu_capabilities *entry,
>>>>>        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
>>>>>            return kaslr_offset() > 0;
>>>>> -    /* Don't force KPTI for CPUs that are not vulnerable */
>>>>> -    if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
>>>>> -        return false;
>>>>> -
>>>>> -    /* Defer to CPU feature registers */
>>>>> -    return !has_cpuid_feature(entry, scope);
>>>>> +    return !meltdown_safe;
>>>>>    }
>>>>> +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>>>>>    static void
>>>>>    kpti_install_ng_mappings(const struct arm64_cpu_capabilities 
>>>>> *__unused)
>>>>>    {
>>>>> @@ -1022,6 +1033,13 @@ kpti_install_ng_mappings(const struct
>>>>> arm64_cpu_capabilities *__unused)
>>>>>        return;
>>>>>    }
>>>>> +#else
>>>>> +static void
>>>>> +kpti_install_ng_mappings(const struct arm64_cpu_capabilities 
>>>>> *__unused)
>>>>> +{
>>>>> +}
>>>>> +#endif    /* CONFIG_UNMAP_KERNEL_AT_EL0 */
>>>>> +
>>>>>    static int __init parse_kpti(char *str)
>>>>>    {
>>>>> @@ -1035,7 +1053,6 @@ static int __init parse_kpti(char *str)
>>>>>        return 0;
>>>>>    }
>>>>>    early_param("kpti", parse_kpti);
>>>>> -#endif    /* CONFIG_UNMAP_KERNEL_AT_EL0 */
>>>>>    #ifdef CONFIG_ARM64_HW_AFDBM
>>>>>    static inline void __cpu_enable_hw_dbm(void)
>>>>> @@ -1286,7 +1303,6 @@ static const struct arm64_cpu_capabilities
>>>>> arm64_features[] = {
>>>>>            .field_pos = ID_AA64PFR0_EL0_SHIFT,
>>>>>            .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
>>>>>        },
>>>>> -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>>>>>        {
>>>>>            .desc = "Kernel page table isolation (KPTI)",
>>>>>            .capability = ARM64_UNMAP_KERNEL_AT_EL0,
>>>>> @@ -1302,7 +1318,6 @@ static const struct arm64_cpu_capabilities
>>>>> arm64_features[] = {
>>>>>            .matches = unmap_kernel_at_el0,
>>>>>            .cpu_enable = kpti_install_ng_mappings,
>>>>>        },
>>>>> -#endif
>>>>>        {
>>>>>            /* FP/SIMD is not implemented */
>>>>>            .capability = ARM64_HAS_NO_FPSIMD,
>>>>> @@ -2063,3 +2078,15 @@ static int __init enable_mrs_emulation(void)
>>>>>    }
>>>>>    core_initcall(enable_mrs_emulation);
>>>>> +
>>>>> +ssize_t cpu_show_meltdown(struct device *dev, struct
>>>>> device_attribute *attr,
>>>>> +        char *buf)
>>>>> +{
>>>>> +    if (arm64_kernel_unmapped_at_el0())
>>>>> +        return sprintf(buf, "Mitigation: KPTI\n");
>>>>> +
>>>>> +    if (__meltdown_safe)
>>>>> +        return sprintf(buf, "Not affected\n");
>>>>
>>>> Shall those two checks be swapped? So it doesn't report about a KPTI
>>>> mitigation if the CPU is safe, but we enable KPTI because of KASLR
>>>> having enabled it? Or is that a different knob?
>>>
>>> Hmmm, I think having it this way reflects the fact that the machine is
>>> mitigated independent of whether it needed it. The force on case is 
>>> similar.
>>> The machine may not have needed the mitigation but it was forced on.
>>
>> So is this patchset about showing vulnerabilities _and_ mitigations or
>> just one of them?
>>
> 
> Well, I don't think there is a way to express a mitigated but not 
> vulnerable state in the current ABI. This set is mostly just to bring us 
> in line with the current ABI expectations.

Yeah, from a user's point of view this is probably just bikeshedding, 
because the system is safe either way. If there are good arguments later 
on to prefer "Not affected" over "Mitigated", we can still change it.

Cheers,
Andre.

Patch
diff mbox series

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f6d84e2c92fe..d31bd770acba 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -944,7 +944,7 @@  has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
 	return has_cpuid_feature(entry, scope);
 }
 
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static bool __meltdown_safe = true;
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
@@ -963,6 +963,16 @@  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 		{ /* sentinel */ }
 	};
 	char const *str = "command line option";
+	bool meltdown_safe;
+
+	meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+
+	/* Defer to CPU feature registers */
+	if (has_cpuid_feature(entry, scope))
+		meltdown_safe = true;
+
+	if (!meltdown_safe)
+		__meltdown_safe = false;
 
 	/*
 	 * For reasons that aren't entirely clear, enabling KPTI on Cavium
@@ -974,6 +984,11 @@  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 		__kpti_forced = -1;
 	}
 
+	if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+		pr_info_once("kernel page table isolation disabled by CONFIG\n");
+		return false;
+	}
+
 	/* Forced? */
 	if (__kpti_forced) {
 		pr_info_once("kernel page table isolation forced %s by %s\n",
@@ -985,14 +1000,10 @@  static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
 		return kaslr_offset() > 0;
 
-	/* Don't force KPTI for CPUs that are not vulnerable */
-	if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
-		return false;
-
-	/* Defer to CPU feature registers */
-	return !has_cpuid_feature(entry, scope);
+	return !meltdown_safe;
 }
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 static void
 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 {
@@ -1022,6 +1033,13 @@  kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 
 	return;
 }
+#else
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+}
+#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
 
 static int __init parse_kpti(char *str)
 {
@@ -1035,7 +1053,6 @@  static int __init parse_kpti(char *str)
 	return 0;
 }
 early_param("kpti", parse_kpti);
-#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 #ifdef CONFIG_ARM64_HW_AFDBM
 static inline void __cpu_enable_hw_dbm(void)
@@ -1286,7 +1303,6 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.field_pos = ID_AA64PFR0_EL0_SHIFT,
 		.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
 	},
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 	{
 		.desc = "Kernel page table isolation (KPTI)",
 		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -1302,7 +1318,6 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.matches = unmap_kernel_at_el0,
 		.cpu_enable = kpti_install_ng_mappings,
 	},
-#endif
 	{
 		/* FP/SIMD is not implemented */
 		.capability = ARM64_HAS_NO_FPSIMD,
@@ -2063,3 +2078,15 @@  static int __init enable_mrs_emulation(void)
 }
 
 core_initcall(enable_mrs_emulation);
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	if (arm64_kernel_unmapped_at_el0())
+		return sprintf(buf, "Mitigation: KPTI\n");
+
+	if (__meltdown_safe)
+		return sprintf(buf, "Not affected\n");
+
+	return sprintf(buf, "Vulnerable\n");
+}