diff mbox series

[v2,02/14] arm64: Detect if in a realm and set RIPAS RAM

Message ID 20240412084213.1733764-3-steven.price@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: Support for running as a guest in Arm CCA | expand

Commit Message

Steven Price April 12, 2024, 8:42 a.m. UTC
From: Suzuki K Poulose <suzuki.poulose@arm.com>

Detect that the VM is a realm guest by the presence of the RSI
interface.

If in a realm then all memory needs to be marked as RIPAS RAM initially,
the loader may or may not have done this for us. To be sure iterate over
all RAM and mark it as such. Any failure is fatal as that implies the
RAM regions passed to Linux are incorrect - which would mean failing
later when attempting to access non-existent RAM.

Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Co-developed-by: Steven Price <steven.price@arm.com>
Signed-off-by: Steven Price <steven.price@arm.com>
---
 arch/arm64/include/asm/rsi.h      | 46 ++++++++++++++++++++++++
 arch/arm64/include/asm/rsi_cmds.h | 22 ++++++++++++
 arch/arm64/kernel/Makefile        |  3 +-
 arch/arm64/kernel/rsi.c           | 58 +++++++++++++++++++++++++++++++
 arch/arm64/kernel/setup.c         |  3 ++
 arch/arm64/mm/init.c              |  2 ++
 6 files changed, 133 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm64/include/asm/rsi.h
 create mode 100644 arch/arm64/kernel/rsi.c

Comments

Catalin Marinas May 10, 2024, 5:35 p.m. UTC | #1
On Fri, Apr 12, 2024 at 09:42:01AM +0100, Steven Price wrote:
> diff --git a/arch/arm64/include/asm/rsi.h b/arch/arm64/include/asm/rsi.h
> new file mode 100644
> index 000000000000..3b56aac5dc43
> --- /dev/null
> +++ b/arch/arm64/include/asm/rsi.h
> @@ -0,0 +1,46 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (C) 2023 ARM Ltd.

You may want to update the year ;).

> + */
> +
> +#ifndef __ASM_RSI_H_
> +#define __ASM_RSI_H_
> +
> +#include <linux/jump_label.h>
> +#include <asm/rsi_cmds.h>
> +
> +extern struct static_key_false rsi_present;

Nitpick: we tend to use DECLARE_STATIC_KEY_FALSE(), it pairs with
DEFINE_STATIC_KEY_FALSE().

> +void arm64_setup_memory(void);
> +
> +void __init arm64_rsi_init(void);
> +static inline bool is_realm_world(void)
> +{
> +	return static_branch_unlikely(&rsi_present);
> +}
> +
> +static inline void set_memory_range(phys_addr_t start, phys_addr_t end,
> +				    enum ripas state)
> +{
> +	unsigned long ret;
> +	phys_addr_t top;
> +
> +	while (start != end) {
> +		ret = rsi_set_addr_range_state(start, end, state, &top);
> +		BUG_ON(ret);
> +		BUG_ON(top < start);
> +		BUG_ON(top > end);

Are these always fatal? BUG_ON() is frowned upon in general. The
alternative would be returning an error code from the function and maybe
printing a warning here (it seems that some people don't like WARN_ON
either but it's better than BUG_ON; could use a pr_err() instead). Also
if something's wrong with the RSI interface to mess up the return
values, it will be hard to debug just from those BUG_ON().

If there's no chance of continuing beyond the point, add a comment on
why we have a BUG_ON().

> diff --git a/arch/arm64/kernel/rsi.c b/arch/arm64/kernel/rsi.c
> new file mode 100644
> index 000000000000..1076649ac082
> --- /dev/null
> +++ b/arch/arm64/kernel/rsi.c
> @@ -0,0 +1,58 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (C) 2023 ARM Ltd.
> + */
> +
> +#include <linux/jump_label.h>
> +#include <linux/memblock.h>
> +#include <asm/rsi.h>
> +
> +DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
> +EXPORT_SYMBOL(rsi_present);

Does this need to be made available to loadable modules?

> +
> +static bool rsi_version_matches(void)
> +{
> +	unsigned long ver;
> +	unsigned long ret = rsi_get_version(RSI_ABI_VERSION, &ver, NULL);

I wonder whether rsi_get_version() is the right name (I know it was
introduced in the previous patch but the usage is here, hence my
comment). From the RMM spec, this looks more like an
rsi_request_version() to me.

TBH, the RMM spec around versioning doesn't fully make sense to me ;). I
assume people working on it had some good reasons around the lower
revision reporting in case of an error.

> +
> +	if (ret == SMCCC_RET_NOT_SUPPORTED)
> +		return false;
> +
> +	if (ver != RSI_ABI_VERSION) {
> +		pr_err("RME: RSI version %lu.%lu not supported\n",
> +		       RSI_ABI_VERSION_GET_MAJOR(ver),
> +		       RSI_ABI_VERSION_GET_MINOR(ver));
> +		return false;
> +	}

The above check matches what the spec says but wouldn't it be clearer to
just check for ret == RSI_SUCCESS? It saves one having to read the spec
to figure out what lower revision actually means in the spec (not the
actual lowest supported but the highest while still lower than the
requested one _or_ equal to the higher revision if the lower is higher
than the requested one - if any of this makes sense to people ;), I'm
sure I missed some other combinations).

> +
> +	pr_info("RME: Using RSI version %lu.%lu\n",
> +		RSI_ABI_VERSION_GET_MAJOR(ver),
> +		RSI_ABI_VERSION_GET_MINOR(ver));
> +
> +	return true;
> +}
> +
> +void arm64_setup_memory(void)

I would give this function a better name, something to resemble the RSI
setup. Similarly for others like set_memory_range_protected/shared().
Some of the functions have 'rsi' in the name like arm64_rsi_init() but
others don't and at a first look they'd seem like some generic memory
setup on arm64, not RSI-specific.

> +{
> +	u64 i;
> +	phys_addr_t start, end;
> +
> +	if (!static_branch_unlikely(&rsi_present))
> +		return;

We have an accessor for rsi_present - is_realm_world(). Why not use
that?

> +
> +	/*
> +	 * Iterate over the available memory ranges
> +	 * and convert the state to protected memory.
> +	 */
> +	for_each_mem_range(i, &start, &end) {
> +		set_memory_range_protected(start, end);
> +	}
> +}
> +
> +void __init arm64_rsi_init(void)
> +{
> +	if (!rsi_version_matches())
> +		return;
> +
> +	static_branch_enable(&rsi_present);
> +}
> diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
> index 65a052bf741f..a4bd97e74704 100644
> --- a/arch/arm64/kernel/setup.c
> +++ b/arch/arm64/kernel/setup.c
> @@ -43,6 +43,7 @@
>  #include <asm/cpu_ops.h>
>  #include <asm/kasan.h>
>  #include <asm/numa.h>
> +#include <asm/rsi.h>
>  #include <asm/scs.h>
>  #include <asm/sections.h>
>  #include <asm/setup.h>
> @@ -293,6 +294,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
>  	 * cpufeature code and early parameters.
>  	 */
>  	jump_label_init();
> +	/* Init RSI after jump_labels are active */
> +	arm64_rsi_init();
>  	parse_early_param();

Does it need to be this early? It's fine for now but I wonder whether we
may have some early parameter at some point that could influence what we
do in the arm64_rsi_init(). I'd move it after or maybe even as part of
the arm64_setup_memory(), though I haven't read the following patches if
they update this function.

>  
>  	dynamic_scs_init();
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 03efd86dce0a..786fd6ce5f17 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -40,6 +40,7 @@
>  #include <asm/kvm_host.h>
>  #include <asm/memory.h>
>  #include <asm/numa.h>
> +#include <asm/rsi.h>
>  #include <asm/sections.h>
>  #include <asm/setup.h>
>  #include <linux/sizes.h>
> @@ -313,6 +314,7 @@ void __init arm64_memblock_init(void)
>  	early_init_fdt_scan_reserved_mem();
>  
>  	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
> +	arm64_setup_memory();
>  }

This feels like a random placement. This function is about memblock
initialisation. You might as well put it in paging_init(), it could make
more sense there. But I'd rather keep it in setup_arch() immediately
after arm64_memblock_init().
Suzuki K Poulose May 14, 2024, 10:18 a.m. UTC | #2
Hi Catalin,

On 10/05/2024 18:35, Catalin Marinas wrote:
> On Fri, Apr 12, 2024 at 09:42:01AM +0100, Steven Price wrote:
>> diff --git a/arch/arm64/include/asm/rsi.h b/arch/arm64/include/asm/rsi.h
>> new file mode 100644
>> index 000000000000..3b56aac5dc43
>> --- /dev/null
>> +++ b/arch/arm64/include/asm/rsi.h
>> @@ -0,0 +1,46 @@
>> +/* SPDX-License-Identifier: GPL-2.0-only */
>> +/*
>> + * Copyright (C) 2023 ARM Ltd.
> 
> You may want to update the year ;).

This was written in 2023 ;-), hasn't changed much since, hence the year.

> 
>> + */
>> +
>> +#ifndef __ASM_RSI_H_
>> +#define __ASM_RSI_H_
>> +
>> +#include <linux/jump_label.h>
>> +#include <asm/rsi_cmds.h>
>> +
>> +extern struct static_key_false rsi_present;
> 
> Nitpick: we tend to use DECLARE_STATIC_KEY_FALSE(), it pairs with
> DEFINE_STATIC_KEY_FALSE().

Agree

> 
>> +void arm64_setup_memory(void);
>> +
>> +void __init arm64_rsi_init(void);
>> +static inline bool is_realm_world(void)
>> +{
>> +	return static_branch_unlikely(&rsi_present);
>> +}
>> +
>> +static inline void set_memory_range(phys_addr_t start, phys_addr_t end,
>> +				    enum ripas state)
>> +{
>> +	unsigned long ret;
>> +	phys_addr_t top;
>> +
>> +	while (start != end) {
>> +		ret = rsi_set_addr_range_state(start, end, state, &top);
>> +		BUG_ON(ret);
>> +		BUG_ON(top < start);
>> +		BUG_ON(top > end);
> 
> Are these always fatal? BUG_ON() is frowned upon in general. The
> alternative would be returning an error code from the function and maybe
> printing a warning here (it seems that some people don't like WARN_ON
> either but it's better than BUG_ON; could use a pr_err() instead). Also
> if something's wrong with the RSI interface to mess up the return
> values, it will be hard to debug just from those BUG_ON().

The BUG_ON was put in to stop the guest from running, when it detects
that it cannot transition a given IPA to a desired state. This could
happen manily if the Host described some address to the Guest and has
backed out from the promise.

However, thinking about this a little deeper, we could relax this a bit
and leave it to the "caller" to take an action. e.g.

1. If this fails for "Main memory" -> RIPAS_RAM transition, it is fatal.

2. If we are transitioning some random IPA to RIPAS_EMPTY (for setting 
up in-realm MMIO, which we do not support yet), it may be dealt with.

We could have other cases in the future where we support trusted I/O,
and a failure to transition is not end of the world, but simply refusing
to use a device for e.g.

That said, the current uses in the kernel are always fatal. So, the 
BUG_ON is justified as it stands. Happy to change either ways.

> 
> If there's no chance of continuing beyond the point, add a comment on
> why we have a BUG_ON().
> 
>> diff --git a/arch/arm64/kernel/rsi.c b/arch/arm64/kernel/rsi.c
>> new file mode 100644
>> index 000000000000..1076649ac082
>> --- /dev/null
>> +++ b/arch/arm64/kernel/rsi.c
>> @@ -0,0 +1,58 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/*
>> + * Copyright (C) 2023 ARM Ltd.
>> + */
>> +
>> +#include <linux/jump_label.h>
>> +#include <linux/memblock.h>
>> +#include <asm/rsi.h>
>> +
>> +DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
>> +EXPORT_SYMBOL(rsi_present);
> 
> Does this need to be made available to loadable modules?

Yes, for e.g., TSM_CONFIG report for attestation framework.
Patch 14 in the list.

> 
>> +
>> +static bool rsi_version_matches(void)
>> +{
>> +	unsigned long ver;
>> +	unsigned long ret = rsi_get_version(RSI_ABI_VERSION, &ver, NULL);
> 
> I wonder whether rsi_get_version() is the right name (I know it was
> introduced in the previous patch but the usage is here, hence my
> comment). From the RMM spec, this looks more like an
> rsi_request_version() to me.

Agree.

> 
> TBH, the RMM spec around versioning doesn't fully make sense to me ;). I
> assume people working on it had some good reasons around the lower
> revision reporting in case of an error.

;-)

> 
>> +
>> +	if (ret == SMCCC_RET_NOT_SUPPORTED)
>> +		return false;
>> +
>> +	if (ver != RSI_ABI_VERSION) {
>> +		pr_err("RME: RSI version %lu.%lu not supported\n",
>> +		       RSI_ABI_VERSION_GET_MAJOR(ver),
>> +		       RSI_ABI_VERSION_GET_MINOR(ver));
>> +		return false;
>> +	}
> 
> The above check matches what the spec says but wouldn't it be clearer to
> just check for ret == RSI_SUCCESS? It saves one having to read the spec

Ack. I guess this was never changed since the spec update. I have 
requested a similar change for RMI_ABI_VERSION checks.

> to figure out what lower revision actually means in the spec (not the
> actual lowest supported but the highest while still lower than the
> requested one _or_ equal to the higher revision if the lower is higher
> than the requested one - if any of this makes sense to people ;), I'm
> sure I missed some other combinations).
> 
>> +
>> +	pr_info("RME: Using RSI version %lu.%lu\n",
>> +		RSI_ABI_VERSION_GET_MAJOR(ver),
>> +		RSI_ABI_VERSION_GET_MINOR(ver));
>> +
>> +	return true;
>> +}
>> +
>> +void arm64_setup_memory(void)
> 
> I would give this function a better name, something to resemble the RSI
> setup. Similarly for others like set_memory_range_protected/shared().
> Some of the functions have 'rsi' in the name like arm64_rsi_init() but
> others don't and at a first look they'd seem like some generic memory
> setup on arm64, not RSI-specific.

Ack. arm64_rsi_setup_memory() ? I agree, we should "rsi" fy the names.

> 
>> +{
>> +	u64 i;
>> +	phys_addr_t start, end;
>> +
>> +	if (!static_branch_unlikely(&rsi_present))
>> +		return;
> 
> We have an accessor for rsi_present - is_realm_world(). Why not use
> that?
> 
>> +
>> +	/*
>> +	 * Iterate over the available memory ranges
>> +	 * and convert the state to protected memory.
>> +	 */
>> +	for_each_mem_range(i, &start, &end) {
>> +		set_memory_range_protected(start, end);
>> +	}
>> +}
>> +
>> +void __init arm64_rsi_init(void)
>> +{
>> +	if (!rsi_version_matches())
>> +		return;
>> +
>> +	static_branch_enable(&rsi_present);
>> +}
>> diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
>> index 65a052bf741f..a4bd97e74704 100644
>> --- a/arch/arm64/kernel/setup.c
>> +++ b/arch/arm64/kernel/setup.c
>> @@ -43,6 +43,7 @@
>>   #include <asm/cpu_ops.h>
>>   #include <asm/kasan.h>
>>   #include <asm/numa.h>
>> +#include <asm/rsi.h>
>>   #include <asm/scs.h>
>>   #include <asm/sections.h>
>>   #include <asm/setup.h>
>> @@ -293,6 +294,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
>>   	 * cpufeature code and early parameters.
>>   	 */
>>   	jump_label_init();
>> +	/* Init RSI after jump_labels are active */
>> +	arm64_rsi_init();
>>   	parse_early_param();
> 
> Does it need to be this early? It's fine for now but I wonder whether we
> may have some early parameter at some point that could influence what we
> do in the arm64_rsi_init(). I'd move it after or maybe even as part of
> the arm64_setup_memory(), though I haven't read the following patches if
> they update this function.

We must do this before we setup the "earlycon", so that the console
is accessed using shared alias and that happens via parse_early_param() :-(.


> 
>>   
>>   	dynamic_scs_init();
>> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
>> index 03efd86dce0a..786fd6ce5f17 100644
>> --- a/arch/arm64/mm/init.c
>> +++ b/arch/arm64/mm/init.c
>> @@ -40,6 +40,7 @@
>>   #include <asm/kvm_host.h>
>>   #include <asm/memory.h>
>>   #include <asm/numa.h>
>> +#include <asm/rsi.h>
>>   #include <asm/sections.h>
>>   #include <asm/setup.h>
>>   #include <linux/sizes.h>
>> @@ -313,6 +314,7 @@ void __init arm64_memblock_init(void)
>>   	early_init_fdt_scan_reserved_mem();
>>   
>>   	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
>> +	arm64_setup_memory();
>>   }
> 
> This feels like a random placement. This function is about memblock
> initialisation. You might as well put it in paging_init(), it could make
> more sense there. But I'd rather keep it in setup_arch() immediately
> after arm64_memblock_init().

Makes sense. This was done to make sure we process all the memory
regions, as soon as they are identified.

Suzuki


>
Steven Price May 15, 2024, 3:03 p.m. UTC | #3
On 10/05/2024 18:35, Catalin Marinas wrote:
> On Fri, Apr 12, 2024 at 09:42:01AM +0100, Steven Price wrote:
>> diff --git a/arch/arm64/include/asm/rsi.h b/arch/arm64/include/asm/rsi.h
>> new file mode 100644
>> index 000000000000..3b56aac5dc43
>> --- /dev/null
>> +++ b/arch/arm64/include/asm/rsi.h
>> @@ -0,0 +1,46 @@
>> +/* SPDX-License-Identifier: GPL-2.0-only */
>> +/*
>> + * Copyright (C) 2023 ARM Ltd.
> 
> You may want to update the year ;).

Noted! ;)

>> + */
>> +
>> +#ifndef __ASM_RSI_H_
>> +#define __ASM_RSI_H_
>> +
>> +#include <linux/jump_label.h>
>> +#include <asm/rsi_cmds.h>
>> +
>> +extern struct static_key_false rsi_present;
> 
> Nitpick: we tend to use DECLARE_STATIC_KEY_FALSE(), it pairs with
> DEFINE_STATIC_KEY_FALSE().

Makes sense.

>> +void arm64_setup_memory(void);
>> +
>> +void __init arm64_rsi_init(void);
>> +static inline bool is_realm_world(void)
>> +{
>> +	return static_branch_unlikely(&rsi_present);
>> +}
>> +
>> +static inline void set_memory_range(phys_addr_t start, phys_addr_t end,
>> +				    enum ripas state)
>> +{
>> +	unsigned long ret;
>> +	phys_addr_t top;
>> +
>> +	while (start != end) {
>> +		ret = rsi_set_addr_range_state(start, end, state, &top);
>> +		BUG_ON(ret);
>> +		BUG_ON(top < start);
>> +		BUG_ON(top > end);
> 
> Are these always fatal? BUG_ON() is frowned upon in general. The
> alternative would be returning an error code from the function and maybe
> printing a warning here (it seems that some people don't like WARN_ON
> either but it's better than BUG_ON; could use a pr_err() instead). Also
> if something's wrong with the RSI interface to mess up the return
> values, it will be hard to debug just from those BUG_ON().
> 
> If there's no chance of continuing beyond the point, add a comment on
> why we have a BUG_ON().

I think you're right, these shouldn't be (immediately) fatal - if this
is a change happening at runtime it might be possible to handle it.
However, at boot time it makes no sense to try to continue (which is
what I was originally looking at when this was written) as the
environment isn't what the guest is expecting, and continuing will
either lead to a later crash, attestation failure, or potential exploit
if the guest can be somehow be tricked to use the shared mapping rather
than the protected one.

I'll update the set_memory_range...() functions to return an error code
and push the boot BUG_ON up the chain (with a comment). But this is
still in the "should never happen" situation so I'll leave a WARN_ON here.

>> diff --git a/arch/arm64/kernel/rsi.c b/arch/arm64/kernel/rsi.c
>> new file mode 100644
>> index 000000000000..1076649ac082
>> --- /dev/null
>> +++ b/arch/arm64/kernel/rsi.c
>> @@ -0,0 +1,58 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/*
>> + * Copyright (C) 2023 ARM Ltd.
>> + */
>> +
>> +#include <linux/jump_label.h>
>> +#include <linux/memblock.h>
>> +#include <asm/rsi.h>
>> +
>> +DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
>> +EXPORT_SYMBOL(rsi_present);
> 
> Does this need to be made available to loadable modules?

It's used by drivers/virt/coco/arm-cca-guest/arm-cca-guest.c (through
is_realm_world()) which can be built as a module - see patch 14.

>> +
>> +static bool rsi_version_matches(void)
>> +{
>> +	unsigned long ver;
>> +	unsigned long ret = rsi_get_version(RSI_ABI_VERSION, &ver, NULL);
> 
> I wonder whether rsi_get_version() is the right name (I know it was
> introduced in the previous patch but the usage is here, hence my
> comment). From the RMM spec, this looks more like an
> rsi_request_version() to me.
> 
> TBH, the RMM spec around versioning doesn't fully make sense to me ;). I
> assume people working on it had some good reasons around the lower
> revision reporting in case of an error.

There's been a fair bit of discussion around versioning. Currently the
RMM implementation is very much a "get version" function. The issue was
what happens if in the future there is an incompatible RMM spec (i.e.
v2.0). The intention is that old OSes will provide the older version
number and give the RMM the opportunity to 'emulate' it. Equally where
the OS supports multiple versions then there's a need to negotiate a
commonly accepted version.

In terms of naming - mostly I've tried to just follow the spec, but the
naming in the spec isn't great. Calling the function rsi_version() would
be confusing so a verb is needed, but I'm not hung up on the verb. I'll
change it to rsi_request_version().

>> +
>> +	if (ret == SMCCC_RET_NOT_SUPPORTED)
>> +		return false;
>> +
>> +	if (ver != RSI_ABI_VERSION) {
>> +		pr_err("RME: RSI version %lu.%lu not supported\n",
>> +		       RSI_ABI_VERSION_GET_MAJOR(ver),
>> +		       RSI_ABI_VERSION_GET_MINOR(ver));
>> +		return false;
>> +	}
> 
> The above check matches what the spec says but wouldn't it be clearer to
> just check for ret == RSI_SUCCESS? It saves one having to read the spec
> to figure out what lower revision actually means in the spec (not the
> actual lowest supported but the highest while still lower than the
> requested one _or_ equal to the higher revision if the lower is higher
> than the requested one - if any of this makes sense to people ;), I'm
> sure I missed some other combinations).

Indeed - I got similar feedback on the RMI side. The spec evolved and I
forgot to update it. It should be sufficient (for now) to just look for
RSI_SUCCESS. Only when we start supporting multiple versions (on the
Linux side) do we need to look at the returned version numbers.

>> +
>> +	pr_info("RME: Using RSI version %lu.%lu\n",
>> +		RSI_ABI_VERSION_GET_MAJOR(ver),
>> +		RSI_ABI_VERSION_GET_MINOR(ver));
>> +
>> +	return true;
>> +}
>> +
>> +void arm64_setup_memory(void)
> 
> I would give this function a better name, something to resemble the RSI
> setup. Similarly for others like set_memory_range_protected/shared().
> Some of the functions have 'rsi' in the name like arm64_rsi_init() but
> others don't and at a first look they'd seem like some generic memory
> setup on arm64, not RSI-specific.

Ack.

>> +{
>> +	u64 i;
>> +	phys_addr_t start, end;
>> +
>> +	if (!static_branch_unlikely(&rsi_present))
>> +		return;
> 
> We have an accessor for rsi_present - is_realm_world(). Why not use
> that?

Will change.

>> +
>> +	/*
>> +	 * Iterate over the available memory ranges
>> +	 * and convert the state to protected memory.
>> +	 */
>> +	for_each_mem_range(i, &start, &end) {
>> +		set_memory_range_protected(start, end);
>> +	}
>> +}
>> +
>> +void __init arm64_rsi_init(void)
>> +{
>> +	if (!rsi_version_matches())
>> +		return;
>> +
>> +	static_branch_enable(&rsi_present);
>> +}
>> diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
>> index 65a052bf741f..a4bd97e74704 100644
>> --- a/arch/arm64/kernel/setup.c
>> +++ b/arch/arm64/kernel/setup.c
>> @@ -43,6 +43,7 @@
>>  #include <asm/cpu_ops.h>
>>  #include <asm/kasan.h>
>>  #include <asm/numa.h>
>> +#include <asm/rsi.h>
>>  #include <asm/scs.h>
>>  #include <asm/sections.h>
>>  #include <asm/setup.h>
>> @@ -293,6 +294,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
>>  	 * cpufeature code and early parameters.
>>  	 */
>>  	jump_label_init();
>> +	/* Init RSI after jump_labels are active */
>> +	arm64_rsi_init();
>>  	parse_early_param();
> 
> Does it need to be this early? It's fine for now but I wonder whether we
> may have some early parameter at some point that could influence what we
> do in the arm64_rsi_init(). I'd move it after or maybe even as part of
> the arm64_setup_memory(), though I haven't read the following patches if
> they update this function.

As Suzuki said - it's needed for "earlycon" to work - I'll put a comment
in explaining.

>>  
>>  	dynamic_scs_init();
>> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
>> index 03efd86dce0a..786fd6ce5f17 100644
>> --- a/arch/arm64/mm/init.c
>> +++ b/arch/arm64/mm/init.c
>> @@ -40,6 +40,7 @@
>>  #include <asm/kvm_host.h>
>>  #include <asm/memory.h>
>>  #include <asm/numa.h>
>> +#include <asm/rsi.h>
>>  #include <asm/sections.h>
>>  #include <asm/setup.h>
>>  #include <linux/sizes.h>
>> @@ -313,6 +314,7 @@ void __init arm64_memblock_init(void)
>>  	early_init_fdt_scan_reserved_mem();
>>  
>>  	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
>> +	arm64_setup_memory();
>>  }
> 
> This feels like a random placement. This function is about memblock
> initialisation. You might as well put it in paging_init(), it could make
> more sense there. But I'd rather keep it in setup_arch() immediately
> after arm64_memblock_init().
> 

Will move.

Thanks,

Steve
Catalin Marinas May 16, 2024, 2:32 p.m. UTC | #4
On Tue, May 14, 2024 at 11:18:13AM +0100, Suzuki K Poulose wrote:
> On 10/05/2024 18:35, Catalin Marinas wrote:
> > On Fri, Apr 12, 2024 at 09:42:01AM +0100, Steven Price wrote:
> > > +void arm64_setup_memory(void)
> > 
> > I would give this function a better name, something to resemble the RSI
> > setup. Similarly for others like set_memory_range_protected/shared().
> > Some of the functions have 'rsi' in the name like arm64_rsi_init() but
> > others don't and at a first look they'd seem like some generic memory
> > setup on arm64, not RSI-specific.
> 
> Ack. arm64_rsi_setup_memory() ? I agree, we should "rsi" fy the names.

This should work. We also have rsi_*() functions without any 'arm64' but
those are strictly about communicating with the RMM, so that's fine.

> > > @@ -293,6 +294,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
> > >   	 * cpufeature code and early parameters.
> > >   	 */
> > >   	jump_label_init();
> > > +	/* Init RSI after jump_labels are active */
> > > +	arm64_rsi_init();
> > >   	parse_early_param();
> > 
> > Does it need to be this early? It's fine for now but I wonder whether we
> > may have some early parameter at some point that could influence what we
> > do in the arm64_rsi_init(). I'd move it after or maybe even as part of
> > the arm64_setup_memory(), though I haven't read the following patches if
> > they update this function.
> 
> We must do this before we setup the "earlycon", so that the console
> is accessed using shared alias and that happens via parse_early_param() :-(.

Ah, ok, makes sense.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/rsi.h b/arch/arm64/include/asm/rsi.h
new file mode 100644
index 000000000000..3b56aac5dc43
--- /dev/null
+++ b/arch/arm64/include/asm/rsi.h
@@ -0,0 +1,46 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 ARM Ltd.
+ */
+
+#ifndef __ASM_RSI_H_
+#define __ASM_RSI_H_
+
+#include <linux/jump_label.h>
+#include <asm/rsi_cmds.h>
+
+extern struct static_key_false rsi_present;
+
+void arm64_setup_memory(void);
+
+void __init arm64_rsi_init(void);
+static inline bool is_realm_world(void)
+{
+	return static_branch_unlikely(&rsi_present);
+}
+
+static inline void set_memory_range(phys_addr_t start, phys_addr_t end,
+				    enum ripas state)
+{
+	unsigned long ret;
+	phys_addr_t top;
+
+	while (start != end) {
+		ret = rsi_set_addr_range_state(start, end, state, &top);
+		BUG_ON(ret);
+		BUG_ON(top < start);
+		BUG_ON(top > end);
+		start = top;
+	}
+}
+
+static inline void set_memory_range_protected(phys_addr_t start, phys_addr_t end)
+{
+	set_memory_range(start, end, RSI_RIPAS_RAM);
+}
+
+static inline void set_memory_range_shared(phys_addr_t start, phys_addr_t end)
+{
+	set_memory_range(start, end, RSI_RIPAS_EMPTY);
+}
+#endif
diff --git a/arch/arm64/include/asm/rsi_cmds.h b/arch/arm64/include/asm/rsi_cmds.h
index 458fb58c4251..b4cbeafa2f41 100644
--- a/arch/arm64/include/asm/rsi_cmds.h
+++ b/arch/arm64/include/asm/rsi_cmds.h
@@ -10,6 +10,11 @@ 
 
 #include <asm/rsi_smc.h>
 
+enum ripas {
+	RSI_RIPAS_EMPTY,
+	RSI_RIPAS_RAM,
+};
+
 static inline void invoke_rsi_fn_smc_with_res(unsigned long function_id,
 					      unsigned long arg0,
 					      unsigned long arg1,
@@ -44,4 +49,21 @@  static inline unsigned long rsi_get_realm_config(struct realm_config *cfg)
 	return res.a0;
 }
 
+static inline unsigned long rsi_set_addr_range_state(phys_addr_t start,
+						     phys_addr_t end,
+						     enum ripas state,
+						     phys_addr_t *top)
+{
+	struct arm_smccc_res res;
+
+	invoke_rsi_fn_smc_with_res(SMC_RSI_IPA_STATE_SET,
+				   start, end, state, RSI_NO_CHANGE_DESTROYED,
+				   &res);
+
+	if (top)
+		*top = res.a1;
+
+	return res.a0;
+}
+
 #endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 763824963ed1..a483b916ed11 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -33,7 +33,8 @@  obj-y			:= debug-monitors.o entry.o irq.o fpsimd.o		\
 			   return_address.o cpuinfo.o cpu_errata.o		\
 			   cpufeature.o alternative.o cacheinfo.o		\
 			   smp.o smp_spin_table.o topology.o smccc-call.o	\
-			   syscall.o proton-pack.o idle.o patching.o pi/
+			   syscall.o proton-pack.o idle.o patching.o pi/	\
+			   rsi.o
 
 obj-$(CONFIG_COMPAT)			+= sys32.o signal32.o			\
 					   sys_compat.o
diff --git a/arch/arm64/kernel/rsi.c b/arch/arm64/kernel/rsi.c
new file mode 100644
index 000000000000..1076649ac082
--- /dev/null
+++ b/arch/arm64/kernel/rsi.c
@@ -0,0 +1,58 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 ARM Ltd.
+ */
+
+#include <linux/jump_label.h>
+#include <linux/memblock.h>
+#include <asm/rsi.h>
+
+DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
+EXPORT_SYMBOL(rsi_present);
+
+static bool rsi_version_matches(void)
+{
+	unsigned long ver;
+	unsigned long ret = rsi_get_version(RSI_ABI_VERSION, &ver, NULL);
+
+	if (ret == SMCCC_RET_NOT_SUPPORTED)
+		return false;
+
+	if (ver != RSI_ABI_VERSION) {
+		pr_err("RME: RSI version %lu.%lu not supported\n",
+		       RSI_ABI_VERSION_GET_MAJOR(ver),
+		       RSI_ABI_VERSION_GET_MINOR(ver));
+		return false;
+	}
+
+	pr_info("RME: Using RSI version %lu.%lu\n",
+		RSI_ABI_VERSION_GET_MAJOR(ver),
+		RSI_ABI_VERSION_GET_MINOR(ver));
+
+	return true;
+}
+
+void arm64_setup_memory(void)
+{
+	u64 i;
+	phys_addr_t start, end;
+
+	if (!static_branch_unlikely(&rsi_present))
+		return;
+
+	/*
+	 * Iterate over the available memory ranges
+	 * and convert the state to protected memory.
+	 */
+	for_each_mem_range(i, &start, &end) {
+		set_memory_range_protected(start, end);
+	}
+}
+
+void __init arm64_rsi_init(void)
+{
+	if (!rsi_version_matches())
+		return;
+
+	static_branch_enable(&rsi_present);
+}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 65a052bf741f..a4bd97e74704 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -43,6 +43,7 @@ 
 #include <asm/cpu_ops.h>
 #include <asm/kasan.h>
 #include <asm/numa.h>
+#include <asm/rsi.h>
 #include <asm/scs.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
@@ -293,6 +294,8 @@  void __init __no_sanitize_address setup_arch(char **cmdline_p)
 	 * cpufeature code and early parameters.
 	 */
 	jump_label_init();
+	/* Init RSI after jump_labels are active */
+	arm64_rsi_init();
 	parse_early_param();
 
 	dynamic_scs_init();
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 03efd86dce0a..786fd6ce5f17 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -40,6 +40,7 @@ 
 #include <asm/kvm_host.h>
 #include <asm/memory.h>
 #include <asm/numa.h>
+#include <asm/rsi.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <linux/sizes.h>
@@ -313,6 +314,7 @@  void __init arm64_memblock_init(void)
 	early_init_fdt_scan_reserved_mem();
 
 	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
+	arm64_setup_memory();
 }
 
 void __init bootmem_init(void)