diff mbox

[RFC,4/4] arm: tee: add basic OP-TEE mediator

Message ID 1507748484-16871-5-git-send-email-volodymyr_babchuk@epam.com (mailing list archive)
State New, archived
Headers show

Commit Message

Volodymyr Babchuk Oct. 11, 2017, 7:01 p.m. UTC
Add basic OP-TEE mediator as an example how TEE mediator framework
works.

Currently it support only calls from Dom0. Calls from other guests
will be declined. It maps OP-TEE static shared memory region into
Dom0 address space, so Dom0 is the only domain which can work with
older versions of OP-TEE.

Also it alters SMC requests by\ adding domain id to request. OP-TEE
can use this information to track requesters.

Albeit being in early development stages, this mediator already can
be used on systems where only Dom0 interacts with OP-TEE.

It was tested on RCAR Salvator-M3 board.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
---
 xen/arch/arm/tee/Kconfig  |   4 ++
 xen/arch/arm/tee/Makefile |   1 +
 xen/arch/arm/tee/optee.c  | 178 ++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 183 insertions(+)
 create mode 100644 xen/arch/arm/tee/optee.c

Comments

Julien Grall Oct. 16, 2017, 2:36 p.m. UTC | #1
Hi Volodymyr,

On 11/10/17 20:01, Volodymyr Babchuk wrote:
> Add basic OP-TEE mediator as an example how TEE mediator framework
> works.
> 
> Currently it support only calls from Dom0. Calls from other guests
> will be declined. It maps OP-TEE static shared memory region into
> Dom0 address space, so Dom0 is the only domain which can work with
> older versions of OP-TEE.
> 
> Also it alters SMC requests by\ adding domain id to request. OP-TEE
> can use this information to track requesters.
> 
> Albeit being in early development stages, this mediator already can
> be used on systems where only Dom0 interacts with OP-TEE.

A link to the spec would be useful here to be able to fully review this 
patch.

> 
> It was tested on RCAR Salvator-M3 board.

Is it with the stock op-tee? Or an updated version?

> 
> Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
> ---
>   xen/arch/arm/tee/Kconfig  |   4 ++
>   xen/arch/arm/tee/Makefile |   1 +
>   xen/arch/arm/tee/optee.c  | 178 ++++++++++++++++++++++++++++++++++++++++++++++
>   3 files changed, 183 insertions(+)
>   create mode 100644 xen/arch/arm/tee/optee.c
> 
> diff --git a/xen/arch/arm/tee/Kconfig b/xen/arch/arm/tee/Kconfig
> index e69de29..7c6b5c6 100644
> --- a/xen/arch/arm/tee/Kconfig
> +++ b/xen/arch/arm/tee/Kconfig
> @@ -0,0 +1,4 @@
> +config ARM_OPTEE
> +	bool "Enable OP-TEE mediator"
> +	default n
> +	depends on ARM_TEE
> diff --git a/xen/arch/arm/tee/Makefile b/xen/arch/arm/tee/Makefile
> index c54d479..9d93b42 100644
> --- a/xen/arch/arm/tee/Makefile
> +++ b/xen/arch/arm/tee/Makefile
> @@ -1 +1,2 @@
>   obj-y += tee.o
> +obj-$(CONFIG_ARM_OPTEE) += optee.o
> diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
> new file mode 100644
> index 0000000..0220691
> --- /dev/null
> +++ b/xen/arch/arm/tee/optee.c
> @@ -0,0 +1,178 @@
> +/*
> + * xen/arch/arm/tee/optee.c
> + *
> + * OP-TEE mediator
> + *
> + * Volodymyr Babchuk <volodymyr_babchuk@epam.com>
> + * Copyright (c) 2017 EPAM Systems.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + */
> +
> +#include <xen/types.h>
> +#include <xen/sched.h>
> +
> +#include <asm/p2m.h>
> +#include <asm/tee.h>
> +
> +#include "optee_msg.h"
> +#include "optee_smc.h"
> +
> +/*
> + * OP-TEE violates SMCCC when it defines own UID. So we need
> + * to place bytes in correct order.

Can you please point the paragraph in the spec where it says that?

> + */
> +#define OPTEE_UID  (xen_uuid_t){{                                               \
> +    (uint8_t)(OPTEE_MSG_UID_0 >>  0), (uint8_t)(OPTEE_MSG_UID_0 >>  8),         \
> +    (uint8_t)(OPTEE_MSG_UID_0 >> 16), (uint8_t)(OPTEE_MSG_UID_0 >> 24),         \
> +    (uint8_t)(OPTEE_MSG_UID_1 >>  0), (uint8_t)(OPTEE_MSG_UID_1 >>  8),         \
> +    (uint8_t)(OPTEE_MSG_UID_1 >> 16), (uint8_t)(OPTEE_MSG_UID_1 >> 24),         \
> +    (uint8_t)(OPTEE_MSG_UID_2 >>  0), (uint8_t)(OPTEE_MSG_UID_2 >>  8),         \
> +    (uint8_t)(OPTEE_MSG_UID_2 >> 16), (uint8_t)(OPTEE_MSG_UID_2 >> 24),         \
> +    (uint8_t)(OPTEE_MSG_UID_3 >>  0), (uint8_t)(OPTEE_MSG_UID_3 >>  8),         \
> +    (uint8_t)(OPTEE_MSG_UID_3 >> 16), (uint8_t)(OPTEE_MSG_UID_3 >> 24),         \
> +    }}
> +
> +static int optee_init(void)
> +{
> +    printk("OP-TEE mediator init done\n");
> +    return 0;
> +}
> +
> +static void optee_domain_create(struct domain *d)
> +{
> +    /*
> +     * Do nothing at this time.
> +     * In the future this function will notify that new VM is started.

You already have a new client with the hardware domain. So don't you 
already need to notifity OP-TEE?

> +     */
> +}
> +
> +static void optee_domain_destroy(struct domain *d)
> +{
> +    /*
> +     * Do nothing at this time.
> +     * In the future this function will notify that VM is being destroyed.
> +     */

Same for the destruction?

> +}
> +
> +static bool forward_call(struct cpu_user_regs *regs)
> +{
> +    register_t resp[4];
> +
> +    call_smccc_smc(get_user_reg(regs, 0),
> +                   get_user_reg(regs, 1),
> +                   get_user_reg(regs, 2),
> +                   get_user_reg(regs, 3),
> +                   get_user_reg(regs, 4),
> +                   get_user_reg(regs, 5),
> +                   get_user_reg(regs, 6),
> +                   /* VM id 0 is reserved for hypervisor itself */

s/VM/client/. Also, on your design document you mentioned that you did 
modify OP-TEE to support multiple client ID. So how do you know whether 
the TEE supports client ID?

Similarly, do you expect OP-TEE to support 16-bit of client identifier?

> +                   current->domain->domain_id +1,
> +                   resp);
> +
> +    set_user_reg(regs, 0, resp[0]);
> +    set_user_reg(regs, 1, resp[1]);
> +    set_user_reg(regs, 2, resp[2]);
> +    set_user_reg(regs, 3, resp[3]);

Who will do the sanity check of the return values? Each callers? If so, 
I would prefer that the results are stored in a temporary array and a 
separate helpers will write them into the domain once the sanity is done.

This would avoid to mistakenly expose unwanted data to a domain.

> +
> +    return true;
> +}
> +
> +static bool handle_get_shm_config(struct cpu_user_regs *regs)
> +{
> +    paddr_t shm_start;
> +    size_t shm_size;
> +    int rc;
> +
> +    printk("handle_get_shm_config\n");

No plain printk in code accessible by the guest. You should use gprintk 
or ratelimit it.

> +    /* Give all static SHM region to the Dom0 */

s/Dom0/Hardware Domain/

But I am not sure what's the point of this check given OP-TEE is only 
supported for the Hardware Domain and you already have a check for that.

> +    if ( current->domain->domain_id != 0 )

Please use is_hardware_domain(current->domain) and not open-code the check.

> +        return false;
> +
> +    forward_call(regs);
> +
> +    /* Return error back to the guest */
> +    if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
> +        return true;

This is quite confusing to read, I think it would make sense that 
forward_call return the error.

> +
> +    shm_start = get_user_reg(regs, 1);
> +    shm_size = get_user_reg(regs, 2);
> +
> +    /* Dom0 is mapped 1:1 */

Please don't make this assumption or at least add 
ASSERT(is_domain_direct_mapped(d));

> +    rc = map_regions_p2mt(current->domain, gaddr_to_gfn(shm_start),

Rather than using current->domain everywhere, I would prefer if you 
introduce a temporary variable for the domain.

> +                          shm_size / PAGE_SIZE,

Please PFN_DOWN(...).

> +                          maddr_to_mfn(shm_start),
> +                          p2m_ram_rw);

What is this shared memory for? I know this is the hardware domain, so 
using p2m_ram_rw would be ok. But I don't think this would be safe 
unless TEE do proper safety check.

> +    if ( rc < 0 )
> +    {
> +        gprintk(XENLOG_INFO, "OP-TEE: Can't map static shm for Dom0: %d", rc);

gprintk already dump the domid. So no need to say Dom0.

> +        set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);
> +    }
> +
> +    return true;
> +}
> +
> +static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
> +{
> +        forward_call(regs);
> +
> +        printk("handle_exchange_capabilities\n");

Same here, no plain prink.

> +        /* Return error back to the guest */
> +        if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
> +            return true;
> +
> +        /* Don't allow guests to work without dynamic SHM */

Hmmm? But you don't support guests today. So why are you checking that?

I would prefer if you either support guest or not. But not half of it as 
it is hard to know how this will end up.

> +        if (current->domain->domain_id != 0 &&
> +            !(get_user_reg(regs, 1) & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) )
> +            set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);

Missing newline.

> +        return true;
> +}
> +
> +static bool optee_handle_smc(struct cpu_user_regs *regs)
> +{
> +    /* At this time we support only calls from Dom0. */
> +    if ( current->domain->domain_id != 0 )

is_hardware_domain(d)

> +        return false;
> +
> +    switch ( get_user_reg(regs, 0) )
> +    {
> +    case OPTEE_SMC_GET_SHM_CONFIG:
> +        return handle_get_shm_config(regs);
> +    case OPTEE_SMC_EXCHANGE_CAPABILITIES:
> +        return handle_exchange_capabilities(regs);
> +    default:
> +        return forward_call(regs);
> +    }
> +    return true;

The return here is not necessary.

> +}
> +
> +static void optee_remove(void)
> +{
> +}
> +
> +static const struct tee_mediator_ops optee_ops =
> +{
> +    .init = optee_init,
> +    .domain_create = optee_domain_create,
> +    .domain_destroy = optee_domain_destroy,
> +    .handle_smc = optee_handle_smc,
> +    .remove = optee_remove,
> +};
> +
> +REGISTER_TEE_MEDIATOR(optee, "OP-TEE", OPTEE_UID, &optee_ops);
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> 

Cheers,
Volodymyr Babchuk Oct. 17, 2017, 5:08 p.m. UTC | #2
On Mon, Oct 16, 2017 at 03:36:38PM +0100, Julien Grall wrote:
> Hi Volodymyr,
Hi Julien,

> On 11/10/17 20:01, Volodymyr Babchuk wrote:
> >Add basic OP-TEE mediator as an example how TEE mediator framework
> >works.
> >
> >Currently it support only calls from Dom0. Calls from other guests
> >will be declined. It maps OP-TEE static shared memory region into
> >Dom0 address space, so Dom0 is the only domain which can work with
> >older versions of OP-TEE.
> >
> >Also it alters SMC requests by\ adding domain id to request. OP-TEE
> >can use this information to track requesters.
> >
> >Albeit being in early development stages, this mediator already can
> >be used on systems where only Dom0 interacts with OP-TEE.
> 
> A link to the spec would be useful here to be able to fully review this
> patch.
Which spec? OP-TEE protocol? It was added in previous commit.

> >
> >It was tested on RCAR Salvator-M3 board.
> 
> Is it with the stock op-tee? Or an updated version?
Static SHM was tested with stock OP-TEE. Dynamic SHM was tested with
my build. But my patches are already merged. OP-TEE 2.6.0 will support
dynamic SHM out of the box.

> >
> >Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
> >---
> >  xen/arch/arm/tee/Kconfig  |   4 ++
> >  xen/arch/arm/tee/Makefile |   1 +
> >  xen/arch/arm/tee/optee.c  | 178 ++++++++++++++++++++++++++++++++++++++++++++++
> >  3 files changed, 183 insertions(+)
> >  create mode 100644 xen/arch/arm/tee/optee.c
> >
> >diff --git a/xen/arch/arm/tee/Kconfig b/xen/arch/arm/tee/Kconfig
> >index e69de29..7c6b5c6 100644
> >--- a/xen/arch/arm/tee/Kconfig
> >+++ b/xen/arch/arm/tee/Kconfig
> >@@ -0,0 +1,4 @@
> >+config ARM_OPTEE
> >+	bool "Enable OP-TEE mediator"
> >+	default n
> >+	depends on ARM_TEE
> >diff --git a/xen/arch/arm/tee/Makefile b/xen/arch/arm/tee/Makefile
> >index c54d479..9d93b42 100644
> >--- a/xen/arch/arm/tee/Makefile
> >+++ b/xen/arch/arm/tee/Makefile
> >@@ -1 +1,2 @@
> >  obj-y += tee.o
> >+obj-$(CONFIG_ARM_OPTEE) += optee.o
> >diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
> >new file mode 100644
> >index 0000000..0220691
> >--- /dev/null
> >+++ b/xen/arch/arm/tee/optee.c
> >@@ -0,0 +1,178 @@
> >+/*
> >+ * xen/arch/arm/tee/optee.c
> >+ *
> >+ * OP-TEE mediator
> >+ *
> >+ * Volodymyr Babchuk <volodymyr_babchuk@epam.com>
> >+ * Copyright (c) 2017 EPAM Systems.
> >+ *
> >+ * This program is free software; you can redistribute it and/or modify
> >+ * it under the terms of the GNU General Public License version 2 as
> >+ * published by the Free Software Foundation.
> >+ *
> >+ * This program is distributed in the hope that it will be useful,
> >+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
> >+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> >+ * GNU General Public License for more details.
> >+ */
> >+
> >+#include <xen/types.h>
> >+#include <xen/sched.h>
> >+
> >+#include <asm/p2m.h>
> >+#include <asm/tee.h>
> >+
> >+#include "optee_msg.h"
> >+#include "optee_smc.h"
> >+
> >+/*
> >+ * OP-TEE violates SMCCC when it defines own UID. So we need
> >+ * to place bytes in correct order.
> 
> Can you please point the paragraph in the spec where it says that?
Sure.

> >+ */
> >+#define OPTEE_UID  (xen_uuid_t){{                                               \
> >+    (uint8_t)(OPTEE_MSG_UID_0 >>  0), (uint8_t)(OPTEE_MSG_UID_0 >>  8),         \
> >+    (uint8_t)(OPTEE_MSG_UID_0 >> 16), (uint8_t)(OPTEE_MSG_UID_0 >> 24),         \
> >+    (uint8_t)(OPTEE_MSG_UID_1 >>  0), (uint8_t)(OPTEE_MSG_UID_1 >>  8),         \
> >+    (uint8_t)(OPTEE_MSG_UID_1 >> 16), (uint8_t)(OPTEE_MSG_UID_1 >> 24),         \
> >+    (uint8_t)(OPTEE_MSG_UID_2 >>  0), (uint8_t)(OPTEE_MSG_UID_2 >>  8),         \
> >+    (uint8_t)(OPTEE_MSG_UID_2 >> 16), (uint8_t)(OPTEE_MSG_UID_2 >> 24),         \
> >+    (uint8_t)(OPTEE_MSG_UID_3 >>  0), (uint8_t)(OPTEE_MSG_UID_3 >>  8),         \
> >+    (uint8_t)(OPTEE_MSG_UID_3 >> 16), (uint8_t)(OPTEE_MSG_UID_3 >> 24),         \
> >+    }}
> >+
> >+static int optee_init(void)
> >+{
> >+    printk("OP-TEE mediator init done\n");
> >+    return 0;
> >+}
> >+
> >+static void optee_domain_create(struct domain *d)
> >+{
> >+    /*
> >+     * Do nothing at this time.
> >+     * In the future this function will notify that new VM is started.
> 
> You already have a new client with the hardware domain. So don't you already
> need to notifity OP-TEE?
Because currently OP-TEE does not support such notification.

> >+     */
> >+}
> >+
> >+static void optee_domain_destroy(struct domain *d)
> >+{
> >+    /*
> >+     * Do nothing at this time.
> >+     * In the future this function will notify that VM is being destroyed.
> >+     */
> 
> Same for the destruction?
The same answer. OP-TEE currently can work with only one domain. I selected
Dom0 for this.

> >+}
> >+
> >+static bool forward_call(struct cpu_user_regs *regs)
> >+{
> >+    register_t resp[4];
> >+
> >+    call_smccc_smc(get_user_reg(regs, 0),
> >+                   get_user_reg(regs, 1),
> >+                   get_user_reg(regs, 2),
> >+                   get_user_reg(regs, 3),
> >+                   get_user_reg(regs, 4),
> >+                   get_user_reg(regs, 5),
> >+                   get_user_reg(regs, 6),
> >+                   /* VM id 0 is reserved for hypervisor itself */
> 
> s/VM/client/. Also, on your design document you mentioned that you did
> modify OP-TEE to support multiple client ID. So how do you know whether the
> TEE supports client ID?
Hm, as I remember, I never mentioned that I modified OP-TEE to support
multiple client IDs. This is my current task.
But, answering your question, optee_domain_create() will tell OP-TEE about
new client ID.

> Similarly, do you expect OP-TEE to support 16-bit of client identifier?
Actually, yes. But I don't expect it to work with all 65535 domains at the
same time.

> >+                   current->domain->domain_id +1,
> >+                   resp);
> >+
> >+    set_user_reg(regs, 0, resp[0]);
> >+    set_user_reg(regs, 1, resp[1]);
> >+    set_user_reg(regs, 2, resp[2]);
> >+    set_user_reg(regs, 3, resp[3]);
> 
> Who will do the sanity check of the return values? Each callers? If so, I
> would prefer that the results are stored in a temporary array and a separate
> helpers will write them into the domain once the sanity is done.
Maybe there will be cases when call will be forwarded straight to OP-TEE and
nobody in hypervisor will examine returned result. At least, at this moment
there are such cases. Probably, in full-scalle mediator this will no longer
be true.

> This would avoid to mistakenly expose unwanted data to a domain.
Correct me, but set_user_reg() modifies data that will be stored in general
purpose registers during return from trap handler. This can't expose any
additional data to a domain.

> >+
> >+    return true;
> >+}
> >+
> >+static bool handle_get_shm_config(struct cpu_user_regs *regs)
> >+{
> >+    paddr_t shm_start;
> >+    size_t shm_size;
> >+    int rc;
> >+
> >+    printk("handle_get_shm_config\n");
> 
> No plain printk in code accessible by the guest. You should use gprintk or
> ratelimit it.
Sorry, this is a debug print. I'll remove it at all. 

> >+    /* Give all static SHM region to the Dom0 */
> 
> s/Dom0/Hardware Domain/
Hm, looks like Dom0 != hardware domain. At least I see code that replaces
contents of hardware_domain variable. If it is possible, then there will
be a problem with static SHM buffer.

Looks like it is better to check for is_domain_direct_mapped(d), as you
mentioned below.

> But I am not sure what's the point of this check given OP-TEE is only
> supported for the Hardware Domain and you already have a check for that.
Because I will remove outer check. But this check will remain. In this way
older OP-TEEs (without virtualization support) will still be accessible
from Dom0/HWDom.

> >+    if ( current->domain->domain_id != 0 )
> 
> Please use is_hardware_domain(current->domain) and not open-code the check.
> 
> >+        return false;
> >+
> >+    forward_call(regs);
> >+
> >+    /* Return error back to the guest */
> >+    if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
> >+        return true;
> 
> This is quite confusing to read, I think it would make sense that
> forward_call return the error.
Good idea, thanks.

> >+
> >+    shm_start = get_user_reg(regs, 1);
> >+    shm_size = get_user_reg(regs, 2);
> >+
> >+    /* Dom0 is mapped 1:1 */
> 
> Please don't make this assumption or at least add
> ASSERT(is_domain_direct_mapped(d));
Thanks. I'll check this in runtime, as I mentioned above.

> >+    rc = map_regions_p2mt(current->domain, gaddr_to_gfn(shm_start),
> 
> Rather than using current->domain everywhere, I would prefer if you
> introduce a temporary variable for the domain.
Okay.

> >+                          shm_size / PAGE_SIZE,
> 
> Please PFN_DOWN(...).
> 
> >+                          maddr_to_mfn(shm_start),
> >+                          p2m_ram_rw);
> 
> What is this shared memory for? I know this is the hardware domain, so using
> p2m_ram_rw would be ok. But I don't think this would be safe unless TEE do
> proper safety check.
Linux kernel driver does memremap() in such place. OP-TEE maps it as non-secure
RAM. This shared memory is used to pass information between OP-TEE OS
and OP-TEE client. About which safety check you are talking?

> 
> >+    if ( rc < 0 )
> >+    {
> >+        gprintk(XENLOG_INFO, "OP-TEE: Can't map static shm for Dom0: %d", rc);
> 
> gprintk already dump the domid. So no need to say Dom0.
I just wanted to emphasis that we mappaed memory for Dom0. Will remove.

> >+        set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);
> >+    }
> >+
> >+    return true;
> >+}
> >+
> >+static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
> >+{
> >+        forward_call(regs);
> >+
> >+        printk("handle_exchange_capabilities\n");
> 
> Same here, no plain prink.
Sorry, this is another debug print. Missed it when formatted patches.

> >+        /* Return error back to the guest */
> >+        if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
> >+            return true;
> >+
> >+        /* Don't allow guests to work without dynamic SHM */
> 
> Hmmm? But you don't support guests today. So why are you checking that?
This is a RFC. Will remove this parts of the code in a proper patch series.

I just wanted to ensure that community is okay with proposed approach and
to show how minimalistic mediator can look.
If you are generally okay with that, I'll address all comments, rework
second patch and push proper patch series.

WBR, Volodymyr
Julien Grall Oct. 17, 2017, 5:30 p.m. UTC | #3
On 17/10/17 18:08, Volodymyr Babchuk wrote:
> On Mon, Oct 16, 2017 at 03:36:38PM +0100, Julien Grall wrote:
>> Hi Volodymyr,
> Hi Julien,
> 
>> On 11/10/17 20:01, Volodymyr Babchuk wrote:
>>> Add basic OP-TEE mediator as an example how TEE mediator framework
>>> works.
>>>
>>> Currently it support only calls from Dom0. Calls from other guests
>>> will be declined. It maps OP-TEE static shared memory region into
>>> Dom0 address space, so Dom0 is the only domain which can work with
>>> older versions of OP-TEE.
>>>
>>> Also it alters SMC requests by\ adding domain id to request. OP-TEE
>>> can use this information to track requesters.
>>>
>>> Albeit being in early development stages, this mediator already can
>>> be used on systems where only Dom0 interacts with OP-TEE.
>>
>> A link to the spec would be useful here to be able to fully review this
>> patch.
> Which spec? OP-TEE protocol? It was added in previous commit.

So basically you are saying the header is the documentation of the API? 
There are not external documentation making easier to follow the version...?

> 
>>>
>>> It was tested on RCAR Salvator-M3 board.
>>
>> Is it with the stock op-tee? Or an updated version?
> Static SHM was tested with stock OP-TEE. Dynamic SHM was tested with
> my build. But my patches are already merged. OP-TEE 2.6.0 will support
> dynamic SHM out of the box.
> 
>>>
>>> Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
>>> ---
>>>   xen/arch/arm/tee/Kconfig  |   4 ++
>>>   xen/arch/arm/tee/Makefile |   1 +
>>>   xen/arch/arm/tee/optee.c  | 178 ++++++++++++++++++++++++++++++++++++++++++++++
>>>   3 files changed, 183 insertions(+)
>>>   create mode 100644 xen/arch/arm/tee/optee.c
>>>
>>> diff --git a/xen/arch/arm/tee/Kconfig b/xen/arch/arm/tee/Kconfig
>>> index e69de29..7c6b5c6 100644
>>> --- a/xen/arch/arm/tee/Kconfig
>>> +++ b/xen/arch/arm/tee/Kconfig
>>> @@ -0,0 +1,4 @@
>>> +config ARM_OPTEE
>>> +	bool "Enable OP-TEE mediator"
>>> +	default n
>>> +	depends on ARM_TEE
>>> diff --git a/xen/arch/arm/tee/Makefile b/xen/arch/arm/tee/Makefile
>>> index c54d479..9d93b42 100644
>>> --- a/xen/arch/arm/tee/Makefile
>>> +++ b/xen/arch/arm/tee/Makefile
>>> @@ -1 +1,2 @@
>>>   obj-y += tee.o
>>> +obj-$(CONFIG_ARM_OPTEE) += optee.o
>>> diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
>>> new file mode 100644
>>> index 0000000..0220691
>>> --- /dev/null
>>> +++ b/xen/arch/arm/tee/optee.c
>>> @@ -0,0 +1,178 @@
>>> +/*
>>> + * xen/arch/arm/tee/optee.c
>>> + *
>>> + * OP-TEE mediator
>>> + *
>>> + * Volodymyr Babchuk <volodymyr_babchuk@epam.com>
>>> + * Copyright (c) 2017 EPAM Systems.
>>> + *
>>> + * This program is free software; you can redistribute it and/or modify
>>> + * it under the terms of the GNU General Public License version 2 as
>>> + * published by the Free Software Foundation.
>>> + *
>>> + * This program is distributed in the hope that it will be useful,
>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>>> + * GNU General Public License for more details.
>>> + */
>>> +
>>> +#include <xen/types.h>
>>> +#include <xen/sched.h>
>>> +
>>> +#include <asm/p2m.h>
>>> +#include <asm/tee.h>
>>> +
>>> +#include "optee_msg.h"
>>> +#include "optee_smc.h"
>>> +
>>> +/*
>>> + * OP-TEE violates SMCCC when it defines own UID. So we need
>>> + * to place bytes in correct order.
>>
>> Can you please point the paragraph in the spec where it says that?
> Sure.
> 
>>> + */
>>> +#define OPTEE_UID  (xen_uuid_t){{                                               \
>>> +    (uint8_t)(OPTEE_MSG_UID_0 >>  0), (uint8_t)(OPTEE_MSG_UID_0 >>  8),         \
>>> +    (uint8_t)(OPTEE_MSG_UID_0 >> 16), (uint8_t)(OPTEE_MSG_UID_0 >> 24),         \
>>> +    (uint8_t)(OPTEE_MSG_UID_1 >>  0), (uint8_t)(OPTEE_MSG_UID_1 >>  8),         \
>>> +    (uint8_t)(OPTEE_MSG_UID_1 >> 16), (uint8_t)(OPTEE_MSG_UID_1 >> 24),         \
>>> +    (uint8_t)(OPTEE_MSG_UID_2 >>  0), (uint8_t)(OPTEE_MSG_UID_2 >>  8),         \
>>> +    (uint8_t)(OPTEE_MSG_UID_2 >> 16), (uint8_t)(OPTEE_MSG_UID_2 >> 24),         \
>>> +    (uint8_t)(OPTEE_MSG_UID_3 >>  0), (uint8_t)(OPTEE_MSG_UID_3 >>  8),         \
>>> +    (uint8_t)(OPTEE_MSG_UID_3 >> 16), (uint8_t)(OPTEE_MSG_UID_3 >> 24),         \
>>> +    }}
>>> +
>>> +static int optee_init(void)
>>> +{
>>> +    printk("OP-TEE mediator init done\n");
>>> +    return 0;
>>> +}
>>> +
>>> +static void optee_domain_create(struct domain *d)
>>> +{
>>> +    /*
>>> +     * Do nothing at this time.
>>> +     * In the future this function will notify that new VM is started.
>>
>> You already have a new client with the hardware domain. So don't you already
>> need to notifity OP-TEE?
> Because currently OP-TEE does not support such notification.
> 
>>> +     */
>>> +}
>>> +
>>> +static void optee_domain_destroy(struct domain *d)
>>> +{
>>> +    /*
>>> +     * Do nothing at this time.
>>> +     * In the future this function will notify that VM is being destroyed.
>>> +     */
>>
>> Same for the destruction?
> The same answer. OP-TEE currently can work with only one domain. I selected
> Dom0 for this.
> 
>>> +}
>>> +
>>> +static bool forward_call(struct cpu_user_regs *regs)
>>> +{
>>> +    register_t resp[4];
>>> +
>>> +    call_smccc_smc(get_user_reg(regs, 0),
>>> +                   get_user_reg(regs, 1),
>>> +                   get_user_reg(regs, 2),
>>> +                   get_user_reg(regs, 3),
>>> +                   get_user_reg(regs, 4),
>>> +                   get_user_reg(regs, 5),
>>> +                   get_user_reg(regs, 6),
>>> +                   /* VM id 0 is reserved for hypervisor itself */
>>
>> s/VM/client/. Also, on your design document you mentioned that you did
>> modify OP-TEE to support multiple client ID. So how do you know whether the
>> TEE supports client ID?
> Hm, as I remember, I never mentioned that I modified OP-TEE to support
> multiple client IDs. This is my current task.

"On OP-TEE side:
1. Shared memory redesign which is almost complete.
2. Implement new SMCs  from hypervsiror to TEE to track VM lifecycle.
3. Track VM IDs to isolated VM data.
4. RPCs to sleeping guests."

I was kind of expecting that was done given you put a client ID here. If 
it is not done, then why are you passing a ID that we are not even sure 
OP-TEE will be able to understand?

> But, answering your question, optee_domain_create() will tell OP-TEE about
> new client ID.

And I guess you would expect that the registration may fail, hence an 
error return is mandatory in optee_domain_create.

> 
>> Similarly, do you expect OP-TEE to support 16-bit of client identifier?
> Actually, yes. But I don't expect it to work with all 65535 domains at the
> same time.

So how many client do you expect to be supported? And how do you expect 
to get that information back?

> 
>>> +                   current->domain->domain_id +1,
>>> +                   resp);
>>> +
>>> +    set_user_reg(regs, 0, resp[0]);
>>> +    set_user_reg(regs, 1, resp[1]);
>>> +    set_user_reg(regs, 2, resp[2]);
>>> +    set_user_reg(regs, 3, resp[3]);
>>
>> Who will do the sanity check of the return values? Each callers? If so, I
>> would prefer that the results are stored in a temporary array and a separate
>> helpers will write them into the domain once the sanity is done.
> Maybe there will be cases when call will be forwarded straight to OP-TEE and
> nobody in hypervisor will examine returned result. At least, at this moment
> there are such cases. Probably, in full-scalle mediator this will no longer
> be true.
> 
>> This would avoid to mistakenly expose unwanted data to a domain.
> Correct me, but set_user_reg() modifies data that will be stored in general
> purpose registers during return from trap handler. This can't expose any
> additional data to a domain.

Which set_user_reg()? The helper does not do any modification... If you 
speak about the code below, then it is very confusing and error-prone.

If you separate the call from setting the guest registers then the you 
give a hint to the caller that maybe something has to be down and he 
can't blindly trust the result...

> 
>>> +
>>> +    return true;
>>> +}
>>> +
>>> +static bool handle_get_shm_config(struct cpu_user_regs *regs)
>>> +{
>>> +    paddr_t shm_start;
>>> +    size_t shm_size;
>>> +    int rc;
>>> +
>>> +    printk("handle_get_shm_config\n");
>>
>> No plain printk in code accessible by the guest. You should use gprintk or
>> ratelimit it.
> Sorry, this is a debug print. I'll remove it at all.
> 
>>> +    /* Give all static SHM region to the Dom0 */
>>
>> s/Dom0/Hardware Domain/
> Hm, looks like Dom0 != hardware domain. At least I see code that replaces
> contents of hardware_domain variable. If it is possible, then there will
> be a problem with static SHM buffer.

On Arm Dom0 == Hardware Domain. If Hardware Domain were introduced, then 
I would expect OP-TEE to be handled by the it and not Dom0.

> 
> Looks like it is better to check for is_domain_direct_mapped(d), as you
> mentioned below.

is_domain_direct_mapped(d) != hwdom. Please don't mix the both. The 
former is here to proctect you gfn == mfn. The latter is here to make 
sure no other domain than the hardware domain is going to use the shared 
memory.

> 
>> But I am not sure what's the point of this check given OP-TEE is only
>> supported for the Hardware Domain and you already have a check for that.
> Because I will remove outer check. But this check will remain. In this way
> older OP-TEEs (without virtualization support) will still be accessible
> from Dom0/HWDom.
> 
>>> +    if ( current->domain->domain_id != 0 )
>>
>> Please use is_hardware_domain(current->domain) and not open-code the check.
>>
>>> +        return false;
>>> +
>>> +    forward_call(regs);
>>> +
>>> +    /* Return error back to the guest */
>>> +    if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
>>> +        return true;
>>
>> This is quite confusing to read, I think it would make sense that
>> forward_call return the error.
> Good idea, thanks.
> 
>>> +
>>> +    shm_start = get_user_reg(regs, 1);
>>> +    shm_size = get_user_reg(regs, 2);
>>> +
>>> +    /* Dom0 is mapped 1:1 */
>>
>> Please don't make this assumption or at least add
>> ASSERT(is_domain_direct_mapped(d));
> Thanks. I'll check this in runtime, as I mentioned above.
> 
>>> +    rc = map_regions_p2mt(current->domain, gaddr_to_gfn(shm_start),
>>
>> Rather than using current->domain everywhere, I would prefer if you
>> introduce a temporary variable for the domain.
> Okay.
> 
>>> +                          shm_size / PAGE_SIZE,
>>
>> Please PFN_DOWN(...).
>>
>>> +                          maddr_to_mfn(shm_start),
>>> +                          p2m_ram_rw);
>>
>> What is this shared memory for? I know this is the hardware domain, so using
>> p2m_ram_rw would be ok. But I don't think this would be safe unless TEE do
>> proper safety check.
> Linux kernel driver does memremap() in such place. OP-TEE maps it as non-secure
> RAM. This shared memory is used to pass information between OP-TEE OS
> and OP-TEE client. About which safety check you are talking?

Well, does OP-TEE validate the data read from that shared region? But it 
seems that you don't plan to give part of the SHM to a guest, so it 
might be ok.

Also how OP-TEE will map this region? Cacheable...?

> 
>>
>>> +    if ( rc < 0 )
>>> +    {
>>> +        gprintk(XENLOG_INFO, "OP-TEE: Can't map static shm for Dom0: %d", rc);
>>
>> gprintk already dump the domid. So no need to say Dom0.
> I just wanted to emphasis that we mappaed memory for Dom0. Will remove.

gprintk will printk d0. So there are no point to say it a second time...

> 
>>> +        set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);
>>> +    }
>>> +
>>> +    return true;
>>> +}
>>> +
>>> +static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
>>> +{
>>> +        forward_call(regs);
>>> +
>>> +        printk("handle_exchange_capabilities\n");
>>
>> Same here, no plain prink.
> Sorry, this is another debug print. Missed it when formatted patches.
> 
>>> +        /* Return error back to the guest */
>>> +        if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
>>> +            return true;
>>> +
>>> +        /* Don't allow guests to work without dynamic SHM */
>>
>> Hmmm? But you don't support guests today. So why are you checking that?
> This is a RFC. Will remove this parts of the code in a proper patch series.
> 
> I just wanted to ensure that community is okay with proposed approach and
> to show how minimalistic mediator can look.

I don't think this is true. You only show how easy it is to let Dom0 
accessing TEE. And as I said in the cover letter, this is not the 
controversial part.

The more controversial one is the guest support that you completely left 
aside. I believe this part will not be as minimalistic as you think 
because you need to translate buffer address and prevent those buffers 
to disappear under your feet.

There are probably other problem to fix...

> If you are generally okay with that, I'll address all comments, rework
> second patch and push proper patch series.

Cheers,
Volodymyr Babchuk Oct. 17, 2017, 6:57 p.m. UTC | #4
On Tue, Oct 17, 2017 at 06:30:13PM +0100, Julien Grall wrote:

> >>On 11/10/17 20:01, Volodymyr Babchuk wrote:
> >>>Add basic OP-TEE mediator as an example how TEE mediator framework
> >>>works.
> >>>
> >>>Currently it support only calls from Dom0. Calls from other guests
> >>>will be declined. It maps OP-TEE static shared memory region into
> >>>Dom0 address space, so Dom0 is the only domain which can work with
> >>>older versions of OP-TEE.
> >>>
> >>>Also it alters SMC requests by\ adding domain id to request. OP-TEE
> >>>can use this information to track requesters.
> >>>
> >>>Albeit being in early development stages, this mediator already can
> >>>be used on systems where only Dom0 interacts with OP-TEE.
> >>
> >>A link to the spec would be useful here to be able to fully review this
> >>patch.
> >Which spec? OP-TEE protocol? It was added in previous commit.
> 
> So basically you are saying the header is the documentation of the API?
> There are not external documentation making easier to follow the version...?
There are high-level documentation at [1]. All details are covered in headers.

> >
> >>>
> >>>It was tested on RCAR Salvator-M3 board.
> >>
> >>Is it with the stock op-tee? Or an updated version?
> >Static SHM was tested with stock OP-TEE. Dynamic SHM was tested with
> >my build. But my patches are already merged. OP-TEE 2.6.0 will support
> >dynamic SHM out of the box.
> >
> >>>
> >>>Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
> >>>---
> >>>  xen/arch/arm/tee/Kconfig  |   4 ++
> >>>  xen/arch/arm/tee/Makefile |   1 +
> >>>  xen/arch/arm/tee/optee.c  | 178 ++++++++++++++++++++++++++++++++++++++++++++++
> >>>  3 files changed, 183 insertions(+)
> >>>  create mode 100644 xen/arch/arm/tee/optee.c
> >>>
> >>>diff --git a/xen/arch/arm/tee/Kconfig b/xen/arch/arm/tee/Kconfig
> >>>index e69de29..7c6b5c6 100644
> >>>--- a/xen/arch/arm/tee/Kconfig
> >>>+++ b/xen/arch/arm/tee/Kconfig
> >>>@@ -0,0 +1,4 @@
> >>>+config ARM_OPTEE
> >>>+	bool "Enable OP-TEE mediator"
> >>>+	default n
> >>>+	depends on ARM_TEE
> >>>diff --git a/xen/arch/arm/tee/Makefile b/xen/arch/arm/tee/Makefile
> >>>index c54d479..9d93b42 100644
> >>>--- a/xen/arch/arm/tee/Makefile
> >>>+++ b/xen/arch/arm/tee/Makefile
> >>>@@ -1 +1,2 @@
> >>>  obj-y += tee.o
> >>>+obj-$(CONFIG_ARM_OPTEE) += optee.o
> >>>diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
> >>>new file mode 100644
> >>>index 0000000..0220691
> >>>--- /dev/null
> >>>+++ b/xen/arch/arm/tee/optee.c
> >>>@@ -0,0 +1,178 @@
> >>>+/*
> >>>+ * xen/arch/arm/tee/optee.c
> >>>+ *
> >>>+ * OP-TEE mediator
> >>>+ *
> >>>+ * Volodymyr Babchuk <volodymyr_babchuk@epam.com>
> >>>+ * Copyright (c) 2017 EPAM Systems.
> >>>+ *
> >>>+ * This program is free software; you can redistribute it and/or modify
> >>>+ * it under the terms of the GNU General Public License version 2 as
> >>>+ * published by the Free Software Foundation.
> >>>+ *
> >>>+ * This program is distributed in the hope that it will be useful,
> >>>+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
> >>>+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> >>>+ * GNU General Public License for more details.
> >>>+ */
> >>>+
> >>>+#include <xen/types.h>
> >>>+#include <xen/sched.h>
> >>>+
> >>>+#include <asm/p2m.h>
> >>>+#include <asm/tee.h>
> >>>+
> >>>+#include "optee_msg.h"
> >>>+#include "optee_smc.h"
> >>>+
> >>>+/*
> >>>+ * OP-TEE violates SMCCC when it defines own UID. So we need
> >>>+ * to place bytes in correct order.
> >>
> >>Can you please point the paragraph in the spec where it says that?
> >Sure.
> >
> >>>+ */
> >>>+#define OPTEE_UID  (xen_uuid_t){{                                               \
> >>>+    (uint8_t)(OPTEE_MSG_UID_0 >>  0), (uint8_t)(OPTEE_MSG_UID_0 >>  8),         \
> >>>+    (uint8_t)(OPTEE_MSG_UID_0 >> 16), (uint8_t)(OPTEE_MSG_UID_0 >> 24),         \
> >>>+    (uint8_t)(OPTEE_MSG_UID_1 >>  0), (uint8_t)(OPTEE_MSG_UID_1 >>  8),         \
> >>>+    (uint8_t)(OPTEE_MSG_UID_1 >> 16), (uint8_t)(OPTEE_MSG_UID_1 >> 24),         \
> >>>+    (uint8_t)(OPTEE_MSG_UID_2 >>  0), (uint8_t)(OPTEE_MSG_UID_2 >>  8),         \
> >>>+    (uint8_t)(OPTEE_MSG_UID_2 >> 16), (uint8_t)(OPTEE_MSG_UID_2 >> 24),         \
> >>>+    (uint8_t)(OPTEE_MSG_UID_3 >>  0), (uint8_t)(OPTEE_MSG_UID_3 >>  8),         \
> >>>+    (uint8_t)(OPTEE_MSG_UID_3 >> 16), (uint8_t)(OPTEE_MSG_UID_3 >> 24),         \
> >>>+    }}
> >>>+
> >>>+static int optee_init(void)
> >>>+{
> >>>+    printk("OP-TEE mediator init done\n");
> >>>+    return 0;
> >>>+}
> >>>+
> >>>+static void optee_domain_create(struct domain *d)
> >>>+{
> >>>+    /*
> >>>+     * Do nothing at this time.
> >>>+     * In the future this function will notify that new VM is started.
> >>
> >>You already have a new client with the hardware domain. So don't you already
> >>need to notifity OP-TEE?
> >Because currently OP-TEE does not support such notification.
> >
> >>>+     */
> >>>+}
> >>>+
> >>>+static void optee_domain_destroy(struct domain *d)
> >>>+{
> >>>+    /*
> >>>+     * Do nothing at this time.
> >>>+     * In the future this function will notify that VM is being destroyed.
> >>>+     */
> >>
> >>Same for the destruction?
> >The same answer. OP-TEE currently can work with only one domain. I selected
> >Dom0 for this.
> >
> >>>+}
> >>>+
> >>>+static bool forward_call(struct cpu_user_regs *regs)
> >>>+{
> >>>+    register_t resp[4];
> >>>+
> >>>+    call_smccc_smc(get_user_reg(regs, 0),
> >>>+                   get_user_reg(regs, 1),
> >>>+                   get_user_reg(regs, 2),
> >>>+                   get_user_reg(regs, 3),
> >>>+                   get_user_reg(regs, 4),
> >>>+                   get_user_reg(regs, 5),
> >>>+                   get_user_reg(regs, 6),
> >>>+                   /* VM id 0 is reserved for hypervisor itself */
> >>
> >>s/VM/client/. Also, on your design document you mentioned that you did
> >>modify OP-TEE to support multiple client ID. So how do you know whether the
> >>TEE supports client ID?
> >Hm, as I remember, I never mentioned that I modified OP-TEE to support
> >multiple client IDs. This is my current task.
> 
> "On OP-TEE side:
> 1. Shared memory redesign which is almost complete.
> 2. Implement new SMCs  from hypervsiror to TEE to track VM lifecycle.
> 3. Track VM IDs to isolated VM data.
> 4. RPCs to sleeping guests."
Yes, this are my plans. First item is done. I'm currently working on
others. Sorry, looks like I didn't clearly showed, that this is what should
be done. It is not done yet.

> I was kind of expecting that was done given you put a client ID here. If it
> is not done, then why are you passing a ID that we are not even sure OP-TEE
> will be able to understand?
OP-TEE has very rudimentary support of client ID [2]. So it will understand it.
It uses client ID to ensure, that right VM does return from a RPC. There are
no other uses for it right now.

> >But, answering your question, optee_domain_create() will tell OP-TEE about
> >new client ID.
> 
> And I guess you would expect that the registration may fail, hence an error
> return is mandatory in optee_domain_create.
Agree.

> >
> >>Similarly, do you expect OP-TEE to support 16-bit of client identifier?
> >Actually, yes. But I don't expect it to work with all 65535 domains at the
> >same time.
> 
> So how many client do you expect to be supported? And how do you expect to
> get that information back?
I can't answer to your question right now. OP-TEE has limited memory
resources. It need to share them among multiple guests. This is not
done yet. So I can't make assumptions yet.

> >
> >>>+                   current->domain->domain_id +1,
> >>>+                   resp);
> >>>+
> >>>+    set_user_reg(regs, 0, resp[0]);
> >>>+    set_user_reg(regs, 1, resp[1]);
> >>>+    set_user_reg(regs, 2, resp[2]);
> >>>+    set_user_reg(regs, 3, resp[3]);
> >>
> >>Who will do the sanity check of the return values? Each callers? If so, I
> >>would prefer that the results are stored in a temporary array and a separate
> >>helpers will write them into the domain once the sanity is done.
> >Maybe there will be cases when call will be forwarded straight to OP-TEE and
> >nobody in hypervisor will examine returned result. At least, at this moment
> >there are such cases. Probably, in full-scalle mediator this will no longer
> >be true.
> >
> >>This would avoid to mistakenly expose unwanted data to a domain.
> >Correct me, but set_user_reg() modifies data that will be stored in general
> >purpose registers during return from trap handler. This can't expose any
> >additional data to a domain.
> 
> Which set_user_reg()? The helper does not do any modification... If you
> speak about the code below, then it is very confusing and error-prone.
No, I was speaking about code above. The one that calls set_user_reg().
You leave your comment there, so I assumed you are talking about that part.

> If you separate the call from setting the guest registers then the you give
> a hint to the caller that maybe something has to be down and he can't
> blindly trust the result...
Let me describe how this works right now. XEN traps SMC to OP-TEE and forwards
it to the mediator. Mediator examines registers to determine type of the call.
Then it either:

 * Forwards it to OP-TEE as is. This does forward_call(). forward_call()
   executes real SMC and then writes return data to guest registers

 * Forwards it to OP-TEE and then examines result. Again, it uses
   forward_call() to execute SMC and then it checks returned values.

   For example, if guest wanted to exchange capabilities with OP-TEE,
   mediator checks if OP-TEE support dynamic SHM. If it is not supported
   and guest is not hwdom, then mediator injects an error to guest.
   This prevents further initialization of OP-TEE driver in client.

   Another example is static SHM configuration. If this request was sent
   from hwdom, then upon return from OP-TEE, mediator maps static
   shm into hwdom address space. Else it returns an error. DomU sees
   that static SHM is not available and relies only on dynamic SHM.

Idea is to make this transparent for OP-TEE client driver. It does not
need to know that it is running in virtualized environment (one
backward-compatible change will be needed anyways, but this is
another story).

Static SHM is a predefined region, that is shared by OP-TEE OS and OP-TEE client.
Both sides expect that it is a physically contiguous memory region.
Dynamic SHM is never thing. It allows OP-TEE client to use any portion of own
memory as SHM. It was designed with virtualization in mind, so it support
non-contiguous memory regions. Thus it can be used by DomU clients.
> 
> >
> >>>+
> >>>+    return true;
> >>>+}
> >>>+
> >>>+static bool handle_get_shm_config(struct cpu_user_regs *regs)
> >>>+{
> >>>+    paddr_t shm_start;
> >>>+    size_t shm_size;
> >>>+    int rc;
> >>>+
> >>>+    printk("handle_get_shm_config\n");
> >>
> >>No plain printk in code accessible by the guest. You should use gprintk or
> >>ratelimit it.
> >Sorry, this is a debug print. I'll remove it at all.
> >
> >>>+    /* Give all static SHM region to the Dom0 */
> >>
> >>s/Dom0/Hardware Domain/
> >Hm, looks like Dom0 != hardware domain. At least I see code that replaces
> >contents of hardware_domain variable. If it is possible, then there will
> >be a problem with static SHM buffer.
> 
> On Arm Dom0 == Hardware Domain. If Hardware Domain were introduced, then I
> would expect OP-TEE to be handled by the it and not Dom0.
Oh, I see. Thank you for explanation.

> >
> >Looks like it is better to check for is_domain_direct_mapped(d), as you
> >mentioned below.
> 
> is_domain_direct_mapped(d) != hwdom. Please don't mix the both. The former
> is here to proctect you gfn == mfn. The latter is here to make sure no other
> domain than the hardware domain is going to use the shared memory.
Yes, I see. As I said earlier, only 1:1 mapped domain can use static SHM
mechanism. So I think I need to use is_domain_direct_mapped(d).

> >
> >>But I am not sure what's the point of this check given OP-TEE is only
> >>supported for the Hardware Domain and you already have a check for that.
> >Because I will remove outer check. But this check will remain. In this way
> >older OP-TEEs (without virtualization support) will still be accessible
> >from Dom0/HWDom.
> >
> >>>+    if ( current->domain->domain_id != 0 )
> >>
> >>Please use is_hardware_domain(current->domain) and not open-code the check.
> >>
> >>>+        return false;
> >>>+
> >>>+    forward_call(regs);
> >>>+
> >>>+    /* Return error back to the guest */
> >>>+    if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
> >>>+        return true;
> >>
> >>This is quite confusing to read, I think it would make sense that
> >>forward_call return the error.
> >Good idea, thanks.
> >
> >>>+
> >>>+    shm_start = get_user_reg(regs, 1);
> >>>+    shm_size = get_user_reg(regs, 2);
> >>>+
> >>>+    /* Dom0 is mapped 1:1 */
> >>
> >>Please don't make this assumption or at least add
> >>ASSERT(is_domain_direct_mapped(d));
> >Thanks. I'll check this in runtime, as I mentioned above.
> >
> >>>+    rc = map_regions_p2mt(current->domain, gaddr_to_gfn(shm_start),
> >>
> >>Rather than using current->domain everywhere, I would prefer if you
> >>introduce a temporary variable for the domain.
> >Okay.
> >
> >>>+                          shm_size / PAGE_SIZE,
> >>
> >>Please PFN_DOWN(...).
> >>
> >>>+                          maddr_to_mfn(shm_start),
> >>>+                          p2m_ram_rw);
> >>
> >>What is this shared memory for? I know this is the hardware domain, so using
> >>p2m_ram_rw would be ok. But I don't think this would be safe unless TEE do
> >>proper safety check.
> >Linux kernel driver does memremap() in such place. OP-TEE maps it as non-secure
> >RAM. This shared memory is used to pass information between OP-TEE OS
> >and OP-TEE client. About which safety check you are talking?
> 
> Well, does OP-TEE validate the data read from that shared region? But it
> seems that you don't plan to give part of the SHM to a guest, so it might be
> ok.
OP-TEE surely validate all data from NW. Also OP-TEE is written in such way,
that it reads from shared memory only once, to ensure that NW will not change
data after validation. Mediator will do the same.

> Also how OP-TEE will map this region? Cacheable...?
Yes, cacheable, PR, PW, non-secure.

> >
> >>
> >>>+    if ( rc < 0 )
> >>>+    {
> >>>+        gprintk(XENLOG_INFO, "OP-TEE: Can't map static shm for Dom0: %d", rc);
> >>
> >>gprintk already dump the domid. So no need to say Dom0.
> >I just wanted to emphasis that we mappaed memory for Dom0. Will remove.
> 
> gprintk will printk d0. So there are no point to say it a second time...
> >
> >>>+        set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);
> >>>+    }
> >>>+
> >>>+    return true;
> >>>+}
> >>>+
> >>>+static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
> >>>+{
> >>>+        forward_call(regs);
> >>>+
> >>>+        printk("handle_exchange_capabilities\n");
> >>
> >>Same here, no plain prink.
> >Sorry, this is another debug print. Missed it when formatted patches.
> >
> >>>+        /* Return error back to the guest */
> >>>+        if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
> >>>+            return true;
> >>>+
> >>>+        /* Don't allow guests to work without dynamic SHM */
> >>
> >>Hmmm? But you don't support guests today. So why are you checking that?
> >This is a RFC. Will remove this parts of the code in a proper patch series.
> >
> >I just wanted to ensure that community is okay with proposed approach and
> >to show how minimalistic mediator can look.
> I don't think this is true. You only show how easy it is to let Dom0
> accessing TEE. And as I said in the cover letter, this is not the
> controversial part.
Actually I wanted to show approach when mediator resides right in xen.
I got valuable input from you. Now I see that I must completely rework the
first patch. And, probably, show more comprehensive support from OP-TEE side.

> The more controversial one is the guest support that you completely left
> aside. I believe this part will not be as minimalistic as you think because
> you need to translate buffer address and prevent those buffers to disappear
> under your feet.
Yes. I plan to copy all buffers where IPAs presented to another place,
so DomU will not be able to see PAs during translation. And I plan to
pin all DomU pages with a data. Also I'll read from guest pages only
once. I think, this will be enough.

> There are probably other problem to fix...
Probably yes...

I think, I'll focus on OP-TEE side right now and come back when there will
be more more to show.

[1] https://github.com/OP-TEE/optee_os/blob/master/documentation/optee_design.md
[2] https://github.com/OP-TEE/optee_os/blob/master/core/arch/arm/kernel/thread.c#L459


WBR, Volodymyr.
Julien Grall Oct. 19, 2017, 2:01 p.m. UTC | #5
Hi Volodymyr,

On 17/10/17 19:57, Volodymyr Babchuk wrote:
> On Tue, Oct 17, 2017 at 06:30:13PM +0100, Julien Grall wrote:
> 
>>>> On 11/10/17 20:01, Volodymyr Babchuk wrote:
>>>>> Add basic OP-TEE mediator as an example how TEE mediator framework
>>>>> works.
>>>>>
>>>>> Currently it support only calls from Dom0. Calls from other guests
>>>>> will be declined. It maps OP-TEE static shared memory region into
>>>>> Dom0 address space, so Dom0 is the only domain which can work with
>>>>> older versions of OP-TEE.
>>>>>
>>>>> Also it alters SMC requests by\ adding domain id to request. OP-TEE
>>>>> can use this information to track requesters.
>>>>>
>>>>> Albeit being in early development stages, this mediator already can
>>>>> be used on systems where only Dom0 interacts with OP-TEE.
>>>>
>>>> A link to the spec would be useful here to be able to fully review this
>>>> patch.
>>> Which spec? OP-TEE protocol? It was added in previous commit.
>>
>> So basically you are saying the header is the documentation of the API?
>> There are not external documentation making easier to follow the version...?
> There are high-level documentation at [1]. All details are covered in headers.

Thanks.

> 
>>>
>>>>>
>>>>> It was tested on RCAR Salvator-M3 board.
>>>>
>>>> Is it with the stock op-tee? Or an updated version?
>>> Static SHM was tested with stock OP-TEE. Dynamic SHM was tested with
>>> my build. But my patches are already merged. OP-TEE 2.6.0 will support
>>> dynamic SHM out of the box.
>>>
>>>>>
>>>>> Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
>>>>> ---
>>>>>   xen/arch/arm/tee/Kconfig  |   4 ++
>>>>>   xen/arch/arm/tee/Makefile |   1 +
>>>>>   xen/arch/arm/tee/optee.c  | 178 ++++++++++++++++++++++++++++++++++++++++++++++
>>>>>   3 files changed, 183 insertions(+)
>>>>>   create mode 100644 xen/arch/arm/tee/optee.c
>>>>>
>>>>> diff --git a/xen/arch/arm/tee/Kconfig b/xen/arch/arm/tee/Kconfig
>>>>> index e69de29..7c6b5c6 100644
>>>>> --- a/xen/arch/arm/tee/Kconfig
>>>>> +++ b/xen/arch/arm/tee/Kconfig
>>>>> @@ -0,0 +1,4 @@
>>>>> +config ARM_OPTEE
>>>>> +	bool "Enable OP-TEE mediator"
>>>>> +	default n
>>>>> +	depends on ARM_TEE
>>>>> diff --git a/xen/arch/arm/tee/Makefile b/xen/arch/arm/tee/Makefile
>>>>> index c54d479..9d93b42 100644
>>>>> --- a/xen/arch/arm/tee/Makefile
>>>>> +++ b/xen/arch/arm/tee/Makefile
>>>>> @@ -1 +1,2 @@
>>>>>   obj-y += tee.o
>>>>> +obj-$(CONFIG_ARM_OPTEE) += optee.o
>>>>> diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
>>>>> new file mode 100644
>>>>> index 0000000..0220691
>>>>> --- /dev/null
>>>>> +++ b/xen/arch/arm/tee/optee.c
>>>>> @@ -0,0 +1,178 @@
>>>>> +/*
>>>>> + * xen/arch/arm/tee/optee.c
>>>>> + *
>>>>> + * OP-TEE mediator
>>>>> + *
>>>>> + * Volodymyr Babchuk <volodymyr_babchuk@epam.com>
>>>>> + * Copyright (c) 2017 EPAM Systems.
>>>>> + *
>>>>> + * This program is free software; you can redistribute it and/or modify
>>>>> + * it under the terms of the GNU General Public License version 2 as
>>>>> + * published by the Free Software Foundation.
>>>>> + *
>>>>> + * This program is distributed in the hope that it will be useful,
>>>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>>>>> + * GNU General Public License for more details.
>>>>> + */
>>>>> +
>>>>> +#include <xen/types.h>
>>>>> +#include <xen/sched.h>
>>>>> +
>>>>> +#include <asm/p2m.h>
>>>>> +#include <asm/tee.h>
>>>>> +
>>>>> +#include "optee_msg.h"
>>>>> +#include "optee_smc.h"
>>>>> +
>>>>> +/*
>>>>> + * OP-TEE violates SMCCC when it defines own UID. So we need
>>>>> + * to place bytes in correct order.
>>>>
>>>> Can you please point the paragraph in the spec where it says that?
>>> Sure.
>>>
>>>>> + */
>>>>> +#define OPTEE_UID  (xen_uuid_t){{                                               \
>>>>> +    (uint8_t)(OPTEE_MSG_UID_0 >>  0), (uint8_t)(OPTEE_MSG_UID_0 >>  8),         \
>>>>> +    (uint8_t)(OPTEE_MSG_UID_0 >> 16), (uint8_t)(OPTEE_MSG_UID_0 >> 24),         \
>>>>> +    (uint8_t)(OPTEE_MSG_UID_1 >>  0), (uint8_t)(OPTEE_MSG_UID_1 >>  8),         \
>>>>> +    (uint8_t)(OPTEE_MSG_UID_1 >> 16), (uint8_t)(OPTEE_MSG_UID_1 >> 24),         \
>>>>> +    (uint8_t)(OPTEE_MSG_UID_2 >>  0), (uint8_t)(OPTEE_MSG_UID_2 >>  8),         \
>>>>> +    (uint8_t)(OPTEE_MSG_UID_2 >> 16), (uint8_t)(OPTEE_MSG_UID_2 >> 24),         \
>>>>> +    (uint8_t)(OPTEE_MSG_UID_3 >>  0), (uint8_t)(OPTEE_MSG_UID_3 >>  8),         \
>>>>> +    (uint8_t)(OPTEE_MSG_UID_3 >> 16), (uint8_t)(OPTEE_MSG_UID_3 >> 24),         \
>>>>> +    }}
>>>>> +
>>>>> +static int optee_init(void)
>>>>> +{
>>>>> +    printk("OP-TEE mediator init done\n");
>>>>> +    return 0;
>>>>> +}
>>>>> +
>>>>> +static void optee_domain_create(struct domain *d)
>>>>> +{
>>>>> +    /*
>>>>> +     * Do nothing at this time.
>>>>> +     * In the future this function will notify that new VM is started.
>>>>
>>>> You already have a new client with the hardware domain. So don't you already
>>>> need to notifity OP-TEE?
>>> Because currently OP-TEE does not support such notification.
>>>
>>>>> +     */
>>>>> +}
>>>>> +
>>>>> +static void optee_domain_destroy(struct domain *d)
>>>>> +{
>>>>> +    /*
>>>>> +     * Do nothing at this time.
>>>>> +     * In the future this function will notify that VM is being destroyed.
>>>>> +     */
>>>>
>>>> Same for the destruction?
>>> The same answer. OP-TEE currently can work with only one domain. I selected
>>> Dom0 for this.
>>>
>>>>> +}
>>>>> +
>>>>> +static bool forward_call(struct cpu_user_regs *regs)
>>>>> +{
>>>>> +    register_t resp[4];
>>>>> +
>>>>> +    call_smccc_smc(get_user_reg(regs, 0),
>>>>> +                   get_user_reg(regs, 1),
>>>>> +                   get_user_reg(regs, 2),
>>>>> +                   get_user_reg(regs, 3),
>>>>> +                   get_user_reg(regs, 4),
>>>>> +                   get_user_reg(regs, 5),
>>>>> +                   get_user_reg(regs, 6),
>>>>> +                   /* VM id 0 is reserved for hypervisor itself */
>>>>
>>>> s/VM/client/. Also, on your design document you mentioned that you did
>>>> modify OP-TEE to support multiple client ID. So how do you know whether the
>>>> TEE supports client ID?
>>> Hm, as I remember, I never mentioned that I modified OP-TEE to support
>>> multiple client IDs. This is my current task.
>>
>> "On OP-TEE side:
>> 1. Shared memory redesign which is almost complete.
>> 2. Implement new SMCs  from hypervsiror to TEE to track VM lifecycle.
>> 3. Track VM IDs to isolated VM data.
>> 4. RPCs to sleeping guests."
> Yes, this are my plans. First item is done. I'm currently working on
> others. Sorry, looks like I didn't clearly showed, that this is what should
> be done. It is not done yet.
> 
>> I was kind of expecting that was done given you put a client ID here. If it
>> is not done, then why are you passing a ID that we are not even sure OP-TEE
>> will be able to understand?
> OP-TEE has very rudimentary support of client ID [2]. So it will understand it.
> It uses client ID to ensure, that right VM does return from a RPC. There are
> no other uses for it right now.

I am not sure to understand what you mean here. Do you expect OP-TEE to 
block? Or send an interrupt later on to say the work is finish?

[...]

>>>
>>>>> +                   current->domain->domain_id +1,
>>>>> +                   resp);
>>>>> +
>>>>> +    set_user_reg(regs, 0, resp[0]);
>>>>> +    set_user_reg(regs, 1, resp[1]);
>>>>> +    set_user_reg(regs, 2, resp[2]);
>>>>> +    set_user_reg(regs, 3, resp[3]);
>>>>
>>>> Who will do the sanity check of the return values? Each callers? If so, I
>>>> would prefer that the results are stored in a temporary array and a separate
>>>> helpers will write them into the domain once the sanity is done.
>>> Maybe there will be cases when call will be forwarded straight to OP-TEE and
>>> nobody in hypervisor will examine returned result. At least, at this moment
>>> there are such cases. Probably, in full-scalle mediator this will no longer
>>> be true.
>>>
>>>> This would avoid to mistakenly expose unwanted data to a domain.
>>> Correct me, but set_user_reg() modifies data that will be stored in general
>>> purpose registers during return from trap handler. This can't expose any
>>> additional data to a domain.
>>
>> Which set_user_reg()? The helper does not do any modification... If you
>> speak about the code below, then it is very confusing and error-prone.
> No, I was speaking about code above. The one that calls set_user_reg().
> You leave your comment there, so I assumed you are talking about that part.
> 
>> If you separate the call from setting the guest registers then the you give
>> a hint to the caller that maybe something has to be down and he can't
>> blindly trust the result...
> Let me describe how this works right now. XEN traps SMC to OP-TEE and forwards
> it to the mediator. Mediator examines registers to determine type of the call.
> Then it either:
> 
>   * Forwards it to OP-TEE as is. This does forward_call(). forward_call()
>     executes real SMC and then writes return data to guest registers
> 
>   * Forwards it to OP-TEE and then examines result. Again, it uses
>     forward_call() to execute SMC and then it checks returned values.
> 
>     For example, if guest wanted to exchange capabilities with OP-TEE,
>     mediator checks if OP-TEE support dynamic SHM. If it is not supported
>     and guest is not hwdom, then mediator injects an error to guest.
>     This prevents further initialization of OP-TEE driver in client.
> 
>     Another example is static SHM configuration. If this request was sent
>     from hwdom, then upon return from OP-TEE, mediator maps static
>     shm into hwdom address space. Else it returns an error. DomU sees
>     that static SHM is not available and relies only on dynamic SHM.
> 
> Idea is to make this transparent for OP-TEE client driver. It does not
> need to know that it is running in virtualized environment (one
> backward-compatible change will be needed anyways, but this is
> another story).
> 
> Static SHM is a predefined region, that is shared by OP-TEE OS and OP-TEE client.
> Both sides expect that it is a physically contiguous memory region.
> Dynamic SHM is never thing. It allows OP-TEE client to use any portion of own
> memory as SHM. It was designed with virtualization in mind, so it support
> non-contiguous memory regions. Thus it can be used by DomU clients.
Thank you for the explanation, but I don't think this is addressing how 
this would prevent leaking data to the guest.

My request is to move the set_user_reg(...) calls outside of 
call_forward. So this would make clear the mediator needs to examine the 
result values.

To give you an example:

call_forward(....)
/* No need to sanitize value because... */
set_user_reg(...)
set_user_reg(...)

The caller may not need to examine the results. But at least it is clear 
compare to an helper hiding that.

Note that the set_user_reg(...) calls could in a another helper.

>>
>>>
>>>>> +
>>>>> +    return true;
>>>>> +}
>>>>> +
>>>>> +static bool handle_get_shm_config(struct cpu_user_regs *regs)
>>>>> +{
>>>>> +    paddr_t shm_start;
>>>>> +    size_t shm_size;
>>>>> +    int rc;
>>>>> +
>>>>> +    printk("handle_get_shm_config\n");
>>>>
>>>> No plain printk in code accessible by the guest. You should use gprintk or
>>>> ratelimit it.
>>> Sorry, this is a debug print. I'll remove it at all.
>>>
>>>>> +    /* Give all static SHM region to the Dom0 */
>>>>
>>>> s/Dom0/Hardware Domain/
>>> Hm, looks like Dom0 != hardware domain. At least I see code that replaces
>>> contents of hardware_domain variable. If it is possible, then there will
>>> be a problem with static SHM buffer.
>>
>> On Arm Dom0 == Hardware Domain. If Hardware Domain were introduced, then I
>> would expect OP-TEE to be handled by the it and not Dom0.
> Oh, I see. Thank you for explanation.
> 
>>>
>>> Looks like it is better to check for is_domain_direct_mapped(d), as you
>>> mentioned below.
>>
>> is_domain_direct_mapped(d) != hwdom. Please don't mix the both. The former
>> is here to proctect you gfn == mfn. The latter is here to make sure no other
>> domain than the hardware domain is going to use the shared memory.
> Yes, I see. As I said earlier, only 1:1 mapped domain can use static SHM
> mechanism. So I think I need to use is_domain_direct_mapped(d).

But if you use is_domain_direct_mapped(d) here, what will happen if two 
guests asked for shared memory?

> 
>>>
>>>> But I am not sure what's the point of this check given OP-TEE is only
>>>> supported for the Hardware Domain and you already have a check for that.
>>> Because I will remove outer check. But this check will remain. In this way
>>> older OP-TEEs (without virtualization support) will still be accessible
>> >from Dom0/HWDom.
>>>
>>>>> +    if ( current->domain->domain_id != 0 )
>>>>
>>>> Please use is_hardware_domain(current->domain) and not open-code the check.
>>>>
>>>>> +        return false;
>>>>> +
>>>>> +    forward_call(regs);
>>>>> +
>>>>> +    /* Return error back to the guest */
>>>>> +    if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
>>>>> +        return true;
>>>>
>>>> This is quite confusing to read, I think it would make sense that
>>>> forward_call return the error.
>>> Good idea, thanks.
>>>
>>>>> +
>>>>> +    shm_start = get_user_reg(regs, 1);
>>>>> +    shm_size = get_user_reg(regs, 2);
>>>>> +
>>>>> +    /* Dom0 is mapped 1:1 */
>>>>
>>>> Please don't make this assumption or at least add
>>>> ASSERT(is_domain_direct_mapped(d));
>>> Thanks. I'll check this in runtime, as I mentioned above.
>>>
>>>>> +    rc = map_regions_p2mt(current->domain, gaddr_to_gfn(shm_start),
>>>>
>>>> Rather than using current->domain everywhere, I would prefer if you
>>>> introduce a temporary variable for the domain.
>>> Okay.
>>>
>>>>> +                          shm_size / PAGE_SIZE,
>>>>
>>>> Please PFN_DOWN(...).
>>>>
>>>>> +                          maddr_to_mfn(shm_start),
>>>>> +                          p2m_ram_rw);
>>>>
>>>> What is this shared memory for? I know this is the hardware domain, so using
>>>> p2m_ram_rw would be ok. But I don't think this would be safe unless TEE do
>>>> proper safety check.
>>> Linux kernel driver does memremap() in such place. OP-TEE maps it as non-secure
>>> RAM. This shared memory is used to pass information between OP-TEE OS
>>> and OP-TEE client. About which safety check you are talking?
>>
>> Well, does OP-TEE validate the data read from that shared region? But it
>> seems that you don't plan to give part of the SHM to a guest, so it might be
>> ok.
> OP-TEE surely validate all data from NW. Also OP-TEE is written in such way,
> that it reads from shared memory only once, to ensure that NW will not change
> data after validation. Mediator will do the same.

What do you mean by the last bit?

> 
>> Also how OP-TEE will map this region? Cacheable...?
> Yes, cacheable, PR, PW, non-secure.
> 
>>>
>>>>
>>>>> +    if ( rc < 0 )
>>>>> +    {
>>>>> +        gprintk(XENLOG_INFO, "OP-TEE: Can't map static shm for Dom0: %d", rc);
>>>>
>>>> gprintk already dump the domid. So no need to say Dom0.
>>> I just wanted to emphasis that we mappaed memory for Dom0. Will remove.
>>
>> gprintk will printk d0. So there are no point to say it a second time...
>>>
>>>>> +        set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);
>>>>> +    }
>>>>> +
>>>>> +    return true;
>>>>> +}
>>>>> +
>>>>> +static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
>>>>> +{
>>>>> +        forward_call(regs);
>>>>> +
>>>>> +        printk("handle_exchange_capabilities\n");
>>>>
>>>> Same here, no plain prink.
>>> Sorry, this is another debug print. Missed it when formatted patches.
>>>
>>>>> +        /* Return error back to the guest */
>>>>> +        if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
>>>>> +            return true;
>>>>> +
>>>>> +        /* Don't allow guests to work without dynamic SHM */
>>>>
>>>> Hmmm? But you don't support guests today. So why are you checking that?
>>> This is a RFC. Will remove this parts of the code in a proper patch series.
>>>
>>> I just wanted to ensure that community is okay with proposed approach and
>>> to show how minimalistic mediator can look.
>> I don't think this is true. You only show how easy it is to let Dom0
>> accessing TEE. And as I said in the cover letter, this is not the
>> controversial part.
> Actually I wanted to show approach when mediator resides right in xen.
> I got valuable input from you. Now I see that I must completely rework the
> first patch. And, probably, show more comprehensive support from OP-TEE side.
> 
>> The more controversial one is the guest support that you completely left
>> aside. I believe this part will not be as minimalistic as you think because
>> you need to translate buffer address and prevent those buffers to disappear
>> under your feet.
> Yes. I plan to copy all buffers where IPAs presented to another place,
> so DomU will not be able to see PAs during translation. And I plan to
> pin all DomU pages with a data. Also I'll read from guest pages only
> once. I think, this will be enough.
> 
>> There are probably other problem to fix...
> Probably yes...
> 
> I think, I'll focus on OP-TEE side right now and come back when there will
> be more more to show.

To clarify my view. I am not against a temporary support of OP-TEE for 
the hardware domain in Xen. But it does not mean I would be ready to see 
  the a full OP-TEE support for guests in Xen.

Cheers,
Volodymyr Babchuk Oct. 19, 2017, 3:33 p.m. UTC | #6
On Thu, Oct 19, 2017 at 03:01:28PM +0100, Julien Grall wrote:

> Hi Volodymyr,
Hi Julien,

[...]
>>>>>>+}
>>>>>>+
>>>>>>+static bool forward_call(struct cpu_user_regs *regs)
>>>>>>+{
>>>>>>+    register_t resp[4];
>>>>>>+
>>>>>>+    call_smccc_smc(get_user_reg(regs, 0),
>>>>>>+                   get_user_reg(regs, 1),
>>>>>>+                   get_user_reg(regs, 2),
>>>>>>+                   get_user_reg(regs, 3),
>>>>>>+                   get_user_reg(regs, 4),
>>>>>>+                   get_user_reg(regs, 5),
>>>>>>+                   get_user_reg(regs, 6),
>>>>>>+                   /* VM id 0 is reserved for hypervisor itself */
>>>>>
>>>>>s/VM/client/. Also, on your design document you mentioned that you did
>>>>>modify OP-TEE to support multiple client ID. So how do you know whether the
>>>>>TEE supports client ID?
>>>>Hm, as I remember, I never mentioned that I modified OP-TEE to support
>>>>multiple client IDs. This is my current task.
>>>
>>>"On OP-TEE side:
>>>1. Shared memory redesign which is almost complete.
>>>2. Implement new SMCs  from hypervsiror to TEE to track VM lifecycle.
>>>3. Track VM IDs to isolated VM data.
>>>4. RPCs to sleeping guests."
>>Yes, this are my plans. First item is done. I'm currently working on
>>others. Sorry, looks like I didn't clearly showed, that this is what should
>>be done. It is not done yet.
>>
>>>I was kind of expecting that was done given you put a client ID here. If it
>>>is not done, then why are you passing a ID that we are not even sure OP-TEE
>>>will be able to understand?
>>OP-TEE has very rudimentary support of client ID [2]. So it will understand it.
>>It uses client ID to ensure, that right VM does return from a RPC. There are
>>no other uses for it right now.
> 
> I am not sure to understand what you mean here. Do you expect OP-TEE to
> block? Or send an interrupt later on to say the work is finish?
No, OP-TEE will never block in Secure World. Roughty mechanism of RPCs is
described on page 19 of SMCCC. It is called "yielding service call" in SMCCC.
OP-TEE uses client id ensure that resume_call() from one VM does not mimick
resume_call() from other VM. There are more checks, obvioulsy. This is one
of them.

> [...]
> 
>>>>
>>>>>>+                   current->domain->domain_id +1,
>>>>>>+                   resp);
>>>>>>+
>>>>>>+    set_user_reg(regs, 0, resp[0]);
>>>>>>+    set_user_reg(regs, 1, resp[1]);
>>>>>>+    set_user_reg(regs, 2, resp[2]);
>>>>>>+    set_user_reg(regs, 3, resp[3]);
>>>>>
>>>>>Who will do the sanity check of the return values? Each callers? If so, I
>>>>>would prefer that the results are stored in a temporary array and a separate
>>>>>helpers will write them into the domain once the sanity is done.
>>>>Maybe there will be cases when call will be forwarded straight to OP-TEE and
>>>>nobody in hypervisor will examine returned result. At least, at this moment
>>>>there are such cases. Probably, in full-scalle mediator this will no longer
>>>>be true.
>>>>
>>>>>This would avoid to mistakenly expose unwanted data to a domain.
>>>>Correct me, but set_user_reg() modifies data that will be stored in general
>>>>purpose registers during return from trap handler. This can't expose any
>>>>additional data to a domain.
>>>
>>>Which set_user_reg()? The helper does not do any modification... If you
>>>speak about the code below, then it is very confusing and error-prone.
>>No, I was speaking about code above. The one that calls set_user_reg().
>>You leave your comment there, so I assumed you are talking about that part.
>>
>>>If you separate the call from setting the guest registers then the you give
>>>a hint to the caller that maybe something has to be down and he can't
>>>blindly trust the result...
>>Let me describe how this works right now. XEN traps SMC to OP-TEE and forwards
>>it to the mediator. Mediator examines registers to determine type of the call.
>>Then it either:
>>
>>  * Forwards it to OP-TEE as is. This does forward_call(). forward_call()
>>    executes real SMC and then writes return data to guest registers
>>
>>  * Forwards it to OP-TEE and then examines result. Again, it uses
>>    forward_call() to execute SMC and then it checks returned values.
>>
>>    For example, if guest wanted to exchange capabilities with OP-TEE,
>>    mediator checks if OP-TEE support dynamic SHM. If it is not supported
>>    and guest is not hwdom, then mediator injects an error to guest.
>>    This prevents further initialization of OP-TEE driver in client.
>>
>>    Another example is static SHM configuration. If this request was sent
>>    from hwdom, then upon return from OP-TEE, mediator maps static
>>    shm into hwdom address space. Else it returns an error. DomU sees
>>    that static SHM is not available and relies only on dynamic SHM.
>>
>>Idea is to make this transparent for OP-TEE client driver. It does not
>>need to know that it is running in virtualized environment (one
>>backward-compatible change will be needed anyways, but this is
>>another story).
>>
>>Static SHM is a predefined region, that is shared by OP-TEE OS and OP-TEE client.
>>Both sides expect that it is a physically contiguous memory region.
>>Dynamic SHM is never thing. It allows OP-TEE client to use any portion of own
>>memory as SHM. It was designed with virtualization in mind, so it support
>>non-contiguous memory regions. Thus it can be used by DomU clients.
> Thank you for the explanation, but I don't think this is addressing how this
> would prevent leaking data to the guest.
Oh, it just hitted me. I change values in x0/r0 when I want to emit an error,
but I leave other registers as is. This is good point. Thank you.

> My request is to move the set_user_reg(...) calls outside of call_forward.
> So this would make clear the mediator needs to examine the result values.
Ah, I see. You suggest to rename forward_call() to something like execute_call()
and make it return result in some other way.

> To give you an example:
> 
> call_forward(....)
> /* No need to sanitize value because... */
> set_user_reg(...)
> set_user_reg(...)
> 
> The caller may not need to examine the results. But at least it is clear
> compare to an helper hiding that.
> 
> Note that the set_user_reg(...) calls could in a another helper.
Yep. So new executute_call() call does actuall SMC and returns result in
some structure. If I need to return result as is back to VM, I call another
helper. Right?

>>>
>>>>
>>>>>>+
>>>>>>+    return true;
>>>>>>+}
>>>>>>+
>>>>>>+static bool handle_get_shm_config(struct cpu_user_regs *regs)
>>>>>>+{
>>>>>>+    paddr_t shm_start;
>>>>>>+    size_t shm_size;
>>>>>>+    int rc;
>>>>>>+
>>>>>>+    printk("handle_get_shm_config\n");
>>>>>
>>>>>No plain printk in code accessible by the guest. You should use gprintk or
>>>>>ratelimit it.
>>>>Sorry, this is a debug print. I'll remove it at all.
>>>>
>>>>>>+    /* Give all static SHM region to the Dom0 */
>>>>>
>>>>>s/Dom0/Hardware Domain/
>>>>Hm, looks like Dom0 != hardware domain. At least I see code that replaces
>>>>contents of hardware_domain variable. If it is possible, then there will
>>>>be a problem with static SHM buffer.
>>>
>>>On Arm Dom0 == Hardware Domain. If Hardware Domain were introduced, then I
>>>would expect OP-TEE to be handled by the it and not Dom0.
>>Oh, I see. Thank you for explanation.
>>
>>>>
>>>>Looks like it is better to check for is_domain_direct_mapped(d), as you
>>>>mentioned below.
>>>
>>>is_domain_direct_mapped(d) != hwdom. Please don't mix the both. The former
>>>is here to proctect you gfn == mfn. The latter is here to make sure no other
>>>domain than the hardware domain is going to use the shared memory.
>>Yes, I see. As I said earlier, only 1:1 mapped domain can use static SHM
>>mechanism. So I think I need to use is_domain_direct_mapped(d).
> 
> But if you use is_domain_direct_mapped(d) here, what will happen if two
> guests asked for shared memory?
Is is possible that there will be two 1:1 mapped domains in XEN? If yes,
then I need to employ both checks: is_domain_direct_mapped(d) &&
is_hardware_domain(d).

>>
>>>>
>>>>>But I am not sure what's the point of this check given OP-TEE is only
>>>>>supported for the Hardware Domain and you already have a check for that.
>>>>Because I will remove outer check. But this check will remain. In this way
>>>>older OP-TEEs (without virtualization support) will still be accessible
>>>>from Dom0/HWDom.
>>>>
>>>>>>+    if ( current->domain->domain_id != 0 )
>>>>>
>>>>>Please use is_hardware_domain(current->domain) and not open-code the check.
>>>>>
>>>>>>+        return false;
>>>>>>+
>>>>>>+    forward_call(regs);
>>>>>>+
>>>>>>+    /* Return error back to the guest */
>>>>>>+    if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
>>>>>>+        return true;
>>>>>
>>>>>This is quite confusing to read, I think it would make sense that
>>>>>forward_call return the error.
>>>>Good idea, thanks.
>>>>
>>>>>>+
>>>>>>+    shm_start = get_user_reg(regs, 1);
>>>>>>+    shm_size = get_user_reg(regs, 2);
>>>>>>+
>>>>>>+    /* Dom0 is mapped 1:1 */
>>>>>
>>>>>Please don't make this assumption or at least add
>>>>>ASSERT(is_domain_direct_mapped(d));
>>>>Thanks. I'll check this in runtime, as I mentioned above.
>>>>
>>>>>>+    rc = map_regions_p2mt(current->domain, gaddr_to_gfn(shm_start),
>>>>>
>>>>>Rather than using current->domain everywhere, I would prefer if you
>>>>>introduce a temporary variable for the domain.
>>>>Okay.
>>>>
>>>>>>+                          shm_size / PAGE_SIZE,
>>>>>
>>>>>Please PFN_DOWN(...).
>>>>>
>>>>>>+                          maddr_to_mfn(shm_start),
>>>>>>+                          p2m_ram_rw);
>>>>>
>>>>>What is this shared memory for? I know this is the hardware domain, so using
>>>>>p2m_ram_rw would be ok. But I don't think this would be safe unless TEE do
>>>>>proper safety check.
>>>>Linux kernel driver does memremap() in such place. OP-TEE maps it as non-secure
>>>>RAM. This shared memory is used to pass information between OP-TEE OS
>>>>and OP-TEE client. About which safety check you are talking?
>>>
>>>Well, does OP-TEE validate the data read from that shared region? But it
>>>seems that you don't plan to give part of the SHM to a guest, so it might be
>>>ok.
>>OP-TEE surely validate all data from NW. Also OP-TEE is written in such way,
>>that it reads from shared memory only once, to ensure that NW will not change
>>data after validation. Mediator will do the same.
> 
> What do you mean by the last bit?
Let me cite TEE Internal Core API Specification (Public Release v1.1.1):

"
The fact that Memory References may use memory directly shared with
the client implies that the Trusted Application needs to be especially
careful when handling such data: Even if the client is not allowed to
access the shared memory buffer during an operation on this buffer,
the Trusted OS usually cannot enforce this restriction. A
badly-designed or rogue client may well change the content of the
shared memory buffer at any time, even between two consecutive memory
accesses by the Trusted Application. This means that the Trusted
Application needs to be carefully written to avoid any security
problem if this happens. If values in the buffer are security
critical, the Trusted Application SHOULD always read data only once
from a shared buffer and then validate it. It MUST NOT assume that
data written to the buffer can be read unchanged later on.
"

This requirement is for Trusted Applications, but it also true for
TEE OS as a whole and also for TEE mediators as well. I just wanted
to say, that I plan to write mediator in accordance to this.

>>
>>>Also how OP-TEE will map this region? Cacheable...?
>>Yes, cacheable, PR, PW, non-secure.
>>
>>>>
>>>>>
>>>>>>+    if ( rc < 0 )
>>>>>>+    {
>>>>>>+        gprintk(XENLOG_INFO, "OP-TEE: Can't map static shm for Dom0: %d", rc);
>>>>>
>>>>>gprintk already dump the domid. So no need to say Dom0.
>>>>I just wanted to emphasis that we mappaed memory for Dom0. Will remove.
>>>
>>>gprintk will printk d0. So there are no point to say it a second time...
>>>>
>>>>>>+        set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);
>>>>>>+    }
>>>>>>+
>>>>>>+    return true;
>>>>>>+}
>>>>>>+
>>>>>>+static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
>>>>>>+{
>>>>>>+        forward_call(regs);
>>>>>>+
>>>>>>+        printk("handle_exchange_capabilities\n");
>>>>>
>>>>>Same here, no plain prink.
>>>>Sorry, this is another debug print. Missed it when formatted patches.
>>>>
>>>>>>+        /* Return error back to the guest */
>>>>>>+        if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
>>>>>>+            return true;
>>>>>>+
>>>>>>+        /* Don't allow guests to work without dynamic SHM */
>>>>>
>>>>>Hmmm? But you don't support guests today. So why are you checking that?
>>>>This is a RFC. Will remove this parts of the code in a proper patch series.
>>>>
>>>>I just wanted to ensure that community is okay with proposed approach and
>>>>to show how minimalistic mediator can look.
>>>I don't think this is true. You only show how easy it is to let Dom0
>>>accessing TEE. And as I said in the cover letter, this is not the
>>>controversial part.
>>Actually I wanted to show approach when mediator resides right in xen.
>>I got valuable input from you. Now I see that I must completely rework the
>>first patch. And, probably, show more comprehensive support from OP-TEE side.
>>
>>>The more controversial one is the guest support that you completely left
>>>aside. I believe this part will not be as minimalistic as you think because
>>>you need to translate buffer address and prevent those buffers to disappear
>>>under your feet.
>>Yes. I plan to copy all buffers where IPAs presented to another place,
>>so DomU will not be able to see PAs during translation. And I plan to
>>pin all DomU pages with a data. Also I'll read from guest pages only
>>once. I think, this will be enough.
>>
>>>There are probably other problem to fix...
>>Probably yes...
>>
>>I think, I'll focus on OP-TEE side right now and come back when there will
>>be more more to show.
> 
> To clarify my view. I am not against a temporary support of OP-TEE for the
> hardware domain in Xen. But it does not mean I would be ready to see  the a
> full OP-TEE support for guests in Xen.
Hm. What did you mean in last sentence? Our (here, at EPAM) target is full
virtualization support for OP-TEE. If you don't want to see it in Xen,
then what another ways we have?

WBR, Volodymyr
Julien Grall Oct. 19, 2017, 4:12 p.m. UTC | #7
Hi Volodymyr,

On 19/10/17 16:33, Volodymyr Babchuk wrote:
> On Thu, Oct 19, 2017 at 03:01:28PM +0100, Julien Grall wrote:
>> My request is to move the set_user_reg(...) calls outside of call_forward.
>> So this would make clear the mediator needs to examine the result values.
> Ah, I see. You suggest to rename forward_call() to something like execute_call()
> and make it return result in some other way.
> 
>> To give you an example:
>>
>> call_forward(....)
>> /* No need to sanitize value because... */
>> set_user_reg(...)
>> set_user_reg(...)
>>
>> The caller may not need to examine the results. But at least it is clear
>> compare to an helper hiding that.
>>
>> Note that the set_user_reg(...) calls could in a another helper.
> Yep. So new executute_call() call does actuall SMC and returns result in
> some structure. If I need to return result as is back to VM, I call another
> helper. Right?

That's right.

> 
>>>>
>>>>>
>>>>>>> +
>>>>>>> +    return true;
>>>>>>> +}
>>>>>>> +
>>>>>>> +static bool handle_get_shm_config(struct cpu_user_regs *regs)
>>>>>>> +{
>>>>>>> +    paddr_t shm_start;
>>>>>>> +    size_t shm_size;
>>>>>>> +    int rc;
>>>>>>> +
>>>>>>> +    printk("handle_get_shm_config\n");
>>>>>>
>>>>>> No plain printk in code accessible by the guest. You should use gprintk or
>>>>>> ratelimit it.
>>>>> Sorry, this is a debug print. I'll remove it at all.
>>>>>
>>>>>>> +    /* Give all static SHM region to the Dom0 */
>>>>>>
>>>>>> s/Dom0/Hardware Domain/
>>>>> Hm, looks like Dom0 != hardware domain. At least I see code that replaces
>>>>> contents of hardware_domain variable. If it is possible, then there will
>>>>> be a problem with static SHM buffer.
>>>>
>>>> On Arm Dom0 == Hardware Domain. If Hardware Domain were introduced, then I
>>>> would expect OP-TEE to be handled by the it and not Dom0.
>>> Oh, I see. Thank you for explanation.
>>>
>>>>>
>>>>> Looks like it is better to check for is_domain_direct_mapped(d), as you
>>>>> mentioned below.
>>>>
>>>> is_domain_direct_mapped(d) != hwdom. Please don't mix the both. The former
>>>> is here to proctect you gfn == mfn. The latter is here to make sure no other
>>>> domain than the hardware domain is going to use the shared memory.
>>> Yes, I see. As I said earlier, only 1:1 mapped domain can use static SHM
>>> mechanism. So I think I need to use is_domain_direct_mapped(d).
>>
>> But if you use is_domain_direct_mapped(d) here, what will happen if two
>> guests asked for shared memory?
> Is is possible that there will be two 1:1 mapped domains in XEN? If yes,
> then I need to employ both checks: is_domain_direct_mapped(d) &&
> is_hardware_domain(d).

At the moment only the hardware domain is mapped 1:1. But I don't want 
to make that assumption in the code. For instance, I know that one of 
your colleagues was looking at guest mapped 1:1.

> 
>>>
>>>>>
>>>>>> But I am not sure what's the point of this check given OP-TEE is only
>>>>>> supported for the Hardware Domain and you already have a check for that.
>>>>> Because I will remove outer check. But this check will remain. In this way
>>>>> older OP-TEEs (without virtualization support) will still be accessible
>>>> >from Dom0/HWDom.
>>>>>
>>>>>>> +    if ( current->domain->domain_id != 0 )
>>>>>>
>>>>>> Please use is_hardware_domain(current->domain) and not open-code the check.
>>>>>>
>>>>>>> +        return false;
>>>>>>> +
>>>>>>> +    forward_call(regs);
>>>>>>> +
>>>>>>> +    /* Return error back to the guest */
>>>>>>> +    if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
>>>>>>> +        return true;
>>>>>>
>>>>>> This is quite confusing to read, I think it would make sense that
>>>>>> forward_call return the error.
>>>>> Good idea, thanks.
>>>>>
>>>>>>> +
>>>>>>> +    shm_start = get_user_reg(regs, 1);
>>>>>>> +    shm_size = get_user_reg(regs, 2);
>>>>>>> +
>>>>>>> +    /* Dom0 is mapped 1:1 */
>>>>>>
>>>>>> Please don't make this assumption or at least add
>>>>>> ASSERT(is_domain_direct_mapped(d));
>>>>> Thanks. I'll check this in runtime, as I mentioned above.
>>>>>
>>>>>>> +    rc = map_regions_p2mt(current->domain, gaddr_to_gfn(shm_start),
>>>>>>
>>>>>> Rather than using current->domain everywhere, I would prefer if you
>>>>>> introduce a temporary variable for the domain.
>>>>> Okay.
>>>>>
>>>>>>> +                          shm_size / PAGE_SIZE,
>>>>>>
>>>>>> Please PFN_DOWN(...).
>>>>>>
>>>>>>> +                          maddr_to_mfn(shm_start),
>>>>>>> +                          p2m_ram_rw);
>>>>>>
>>>>>> What is this shared memory for? I know this is the hardware domain, so using
>>>>>> p2m_ram_rw would be ok. But I don't think this would be safe unless TEE do
>>>>>> proper safety check.
>>>>> Linux kernel driver does memremap() in such place. OP-TEE maps it as non-secure
>>>>> RAM. This shared memory is used to pass information between OP-TEE OS
>>>>> and OP-TEE client. About which safety check you are talking?
>>>>
>>>> Well, does OP-TEE validate the data read from that shared region? But it
>>>> seems that you don't plan to give part of the SHM to a guest, so it might be
>>>> ok.
>>> OP-TEE surely validate all data from NW. Also OP-TEE is written in such way,
>>> that it reads from shared memory only once, to ensure that NW will not change
>>> data after validation. Mediator will do the same.
>>
>> What do you mean by the last bit?
> Let me cite TEE Internal Core API Specification (Public Release v1.1.1):
> 
> "
> The fact that Memory References may use memory directly shared with
> the client implies that the Trusted Application needs to be especially
> careful when handling such data: Even if the client is not allowed to
> access the shared memory buffer during an operation on this buffer,
> the Trusted OS usually cannot enforce this restriction. A
> badly-designed or rogue client may well change the content of the
> shared memory buffer at any time, even between two consecutive memory
> accesses by the Trusted Application. This means that the Trusted
> Application needs to be carefully written to avoid any security
> problem if this happens. If values in the buffer are security
> critical, the Trusted Application SHOULD always read data only once
> from a shared buffer and then validate it. It MUST NOT assume that
> data written to the buffer can be read unchanged later on.
> "
> 
> This requirement is for Trusted Applications, but it also true for
> TEE OS as a whole and also for TEE mediators as well. I just wanted
> to say, that I plan to write mediator in accordance to this.

It makes sense. Thank you for the explanation.

> 
>>>
>>>> Also how OP-TEE will map this region? Cacheable...?
>>> Yes, cacheable, PR, PW, non-secure.
>>>
>>>>>
>>>>>>
>>>>>>> +    if ( rc < 0 )
>>>>>>> +    {
>>>>>>> +        gprintk(XENLOG_INFO, "OP-TEE: Can't map static shm for Dom0: %d", rc);
>>>>>>
>>>>>> gprintk already dump the domid. So no need to say Dom0.
>>>>> I just wanted to emphasis that we mappaed memory for Dom0. Will remove.
>>>>
>>>> gprintk will printk d0. So there are no point to say it a second time...
>>>>>
>>>>>>> +        set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);
>>>>>>> +    }
>>>>>>> +
>>>>>>> +    return true;
>>>>>>> +}
>>>>>>> +
>>>>>>> +static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
>>>>>>> +{
>>>>>>> +        forward_call(regs);
>>>>>>> +
>>>>>>> +        printk("handle_exchange_capabilities\n");
>>>>>>
>>>>>> Same here, no plain prink.
>>>>> Sorry, this is another debug print. Missed it when formatted patches.
>>>>>
>>>>>>> +        /* Return error back to the guest */
>>>>>>> +        if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
>>>>>>> +            return true;
>>>>>>> +
>>>>>>> +        /* Don't allow guests to work without dynamic SHM */
>>>>>>
>>>>>> Hmmm? But you don't support guests today. So why are you checking that?
>>>>> This is a RFC. Will remove this parts of the code in a proper patch series.
>>>>>
>>>>> I just wanted to ensure that community is okay with proposed approach and
>>>>> to show how minimalistic mediator can look.
>>>> I don't think this is true. You only show how easy it is to let Dom0
>>>> accessing TEE. And as I said in the cover letter, this is not the
>>>> controversial part.
>>> Actually I wanted to show approach when mediator resides right in xen.
>>> I got valuable input from you. Now I see that I must completely rework the
>>> first patch. And, probably, show more comprehensive support from OP-TEE side.
>>>
>>>> The more controversial one is the guest support that you completely left
>>>> aside. I believe this part will not be as minimalistic as you think because
>>>> you need to translate buffer address and prevent those buffers to disappear
>>>> under your feet.
>>> Yes. I plan to copy all buffers where IPAs presented to another place,
>>> so DomU will not be able to see PAs during translation. And I plan to
>>> pin all DomU pages with a data. Also I'll read from guest pages only
>>> once. I think, this will be enough.
>>>
>>>> There are probably other problem to fix...
>>> Probably yes...
>>>
>>> I think, I'll focus on OP-TEE side right now and come back when there will
>>> be more more to show.
>>
>> To clarify my view. I am not against a temporary support of OP-TEE for the
>> hardware domain in Xen. But it does not mean I would be ready to see  the a
>> full OP-TEE support for guests in Xen.
> Hm. What did you mean in last sentence? Our (here, at EPAM) target is full
> virtualization support for OP-TEE. If you don't want to see it in Xen,
> then what another ways we have?

Sorry it was not clear enough. I meant that whilst I am happy to see 
OP-TEE support for the hardware domain in the hypervisor, we still need 
to discuss on the approach for guests.

Cheers,
Volodymyr Babchuk Oct. 19, 2017, 4:37 p.m. UTC | #8
On Thu, Oct 19, 2017 at 05:12:17PM +0100, Julien Grall wrote:

Hi Julien,

> >>>>>>>+    if ( rc < 0 )
> >>>>>>>+    {
> >>>>>>>+        gprintk(XENLOG_INFO, "OP-TEE: Can't map static shm for Dom0: %d", rc);
> >>>>>>
> >>>>>>gprintk already dump the domid. So no need to say Dom0.
> >>>>>I just wanted to emphasis that we mappaed memory for Dom0. Will remove.
> >>>>
> >>>>gprintk will printk d0. So there are no point to say it a second time...
> >>>>>
> >>>>>>>+        set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);
> >>>>>>>+    }
> >>>>>>>+
> >>>>>>>+    return true;
> >>>>>>>+}
> >>>>>>>+
> >>>>>>>+static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
> >>>>>>>+{
> >>>>>>>+        forward_call(regs);
> >>>>>>>+
> >>>>>>>+        printk("handle_exchange_capabilities\n");
> >>>>>>
> >>>>>>Same here, no plain prink.
> >>>>>Sorry, this is another debug print. Missed it when formatted patches.
> >>>>>
> >>>>>>>+        /* Return error back to the guest */
> >>>>>>>+        if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
> >>>>>>>+            return true;
> >>>>>>>+
> >>>>>>>+        /* Don't allow guests to work without dynamic SHM */
> >>>>>>
> >>>>>>Hmmm? But you don't support guests today. So why are you checking that?
> >>>>>This is a RFC. Will remove this parts of the code in a proper patch series.
> >>>>>
> >>>>>I just wanted to ensure that community is okay with proposed approach and
> >>>>>to show how minimalistic mediator can look.
> >>>>I don't think this is true. You only show how easy it is to let Dom0
> >>>>accessing TEE. And as I said in the cover letter, this is not the
> >>>>controversial part.
> >>>Actually I wanted to show approach when mediator resides right in xen.
> >>>I got valuable input from you. Now I see that I must completely rework the
> >>>first patch. And, probably, show more comprehensive support from OP-TEE side.
> >>>
> >>>>The more controversial one is the guest support that you completely left
> >>>>aside. I believe this part will not be as minimalistic as you think because
> >>>>you need to translate buffer address and prevent those buffers to disappear
> >>>>under your feet.
> >>>Yes. I plan to copy all buffers where IPAs presented to another place,
> >>>so DomU will not be able to see PAs during translation. And I plan to
> >>>pin all DomU pages with a data. Also I'll read from guest pages only
> >>>once. I think, this will be enough.
> >>>
> >>>>There are probably other problem to fix...
> >>>Probably yes...
> >>>
> >>>I think, I'll focus on OP-TEE side right now and come back when there will
> >>>be more more to show.
> >>
> >>To clarify my view. I am not against a temporary support of OP-TEE for the
> >>hardware domain in Xen. But it does not mean I would be ready to see  the a
> >>full OP-TEE support for guests in Xen.
> >Hm. What did you mean in last sentence? Our (here, at EPAM) target is full
> >virtualization support for OP-TEE. If you don't want to see it in Xen,
> >then what another ways we have?
> 
> Sorry it was not clear enough. I meant that whilst I am happy to see OP-TEE
> support for the hardware domain in the hypervisor, we still need to discuss
> on the approach for guests.
Excuse me, I still didn't get it. You imply that we need some
completely different approach for guests? Or I can stick with current
approach, just add more restrictions?

Under "current approach" I mostly mean "handle SMCs to TEE at EL2" as
opposed to "handle them in stubdom". Half patches of this RFC should
be severely reworked anyways.

WBR, Volodymyr
Julien Grall Oct. 19, 2017, 4:52 p.m. UTC | #9
Hi,

On 19/10/17 17:37, Volodymyr Babchuk wrote:
> On Thu, Oct 19, 2017 at 05:12:17PM +0100, Julien Grall wrote:
> 
> Hi Julien,
> 
>>>>>>>>> +    if ( rc < 0 )
>>>>>>>>> +    {
>>>>>>>>> +        gprintk(XENLOG_INFO, "OP-TEE: Can't map static shm for Dom0: %d", rc);
>>>>>>>>
>>>>>>>> gprintk already dump the domid. So no need to say Dom0.
>>>>>>> I just wanted to emphasis that we mappaed memory for Dom0. Will remove.
>>>>>>
>>>>>> gprintk will printk d0. So there are no point to say it a second time...
>>>>>>>
>>>>>>>>> +        set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);
>>>>>>>>> +    }
>>>>>>>>> +
>>>>>>>>> +    return true;
>>>>>>>>> +}
>>>>>>>>> +
>>>>>>>>> +static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
>>>>>>>>> +{
>>>>>>>>> +        forward_call(regs);
>>>>>>>>> +
>>>>>>>>> +        printk("handle_exchange_capabilities\n");
>>>>>>>>
>>>>>>>> Same here, no plain prink.
>>>>>>> Sorry, this is another debug print. Missed it when formatted patches.
>>>>>>>
>>>>>>>>> +        /* Return error back to the guest */
>>>>>>>>> +        if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
>>>>>>>>> +            return true;
>>>>>>>>> +
>>>>>>>>> +        /* Don't allow guests to work without dynamic SHM */
>>>>>>>>
>>>>>>>> Hmmm? But you don't support guests today. So why are you checking that?
>>>>>>> This is a RFC. Will remove this parts of the code in a proper patch series.
>>>>>>>
>>>>>>> I just wanted to ensure that community is okay with proposed approach and
>>>>>>> to show how minimalistic mediator can look.
>>>>>> I don't think this is true. You only show how easy it is to let Dom0
>>>>>> accessing TEE. And as I said in the cover letter, this is not the
>>>>>> controversial part.
>>>>> Actually I wanted to show approach when mediator resides right in xen.
>>>>> I got valuable input from you. Now I see that I must completely rework the
>>>>> first patch. And, probably, show more comprehensive support from OP-TEE side.
>>>>>
>>>>>> The more controversial one is the guest support that you completely left
>>>>>> aside. I believe this part will not be as minimalistic as you think because
>>>>>> you need to translate buffer address and prevent those buffers to disappear
>>>>>> under your feet.
>>>>> Yes. I plan to copy all buffers where IPAs presented to another place,
>>>>> so DomU will not be able to see PAs during translation. And I plan to
>>>>> pin all DomU pages with a data. Also I'll read from guest pages only
>>>>> once. I think, this will be enough.
>>>>>
>>>>>> There are probably other problem to fix...
>>>>> Probably yes...
>>>>>
>>>>> I think, I'll focus on OP-TEE side right now and come back when there will
>>>>> be more more to show.
>>>>
>>>> To clarify my view. I am not against a temporary support of OP-TEE for the
>>>> hardware domain in Xen. But it does not mean I would be ready to see  the a
>>>> full OP-TEE support for guests in Xen.
>>> Hm. What did you mean in last sentence? Our (here, at EPAM) target is full
>>> virtualization support for OP-TEE. If you don't want to see it in Xen,
>>> then what another ways we have?
>>
>> Sorry it was not clear enough. I meant that whilst I am happy to see OP-TEE
>> support for the hardware domain in the hypervisor, we still need to discuss
>> on the approach for guests.
> Excuse me, I still didn't get it. You imply that we need some
> completely different approach for guests? Or I can stick with current
> approach, just add more restrictions?
> 
> Under "current approach" I mostly mean "handle SMCs to TEE at EL2" as
> opposed to "handle them in stubdom". Half patches of this RFC should
> be severely reworked anyways.

Let me answer on your cover letter. That would be easier to draw a 
decision with your last e-mail.

Cheers,
diff mbox

Patch

diff --git a/xen/arch/arm/tee/Kconfig b/xen/arch/arm/tee/Kconfig
index e69de29..7c6b5c6 100644
--- a/xen/arch/arm/tee/Kconfig
+++ b/xen/arch/arm/tee/Kconfig
@@ -0,0 +1,4 @@ 
+config ARM_OPTEE
+	bool "Enable OP-TEE mediator"
+	default n
+	depends on ARM_TEE
diff --git a/xen/arch/arm/tee/Makefile b/xen/arch/arm/tee/Makefile
index c54d479..9d93b42 100644
--- a/xen/arch/arm/tee/Makefile
+++ b/xen/arch/arm/tee/Makefile
@@ -1 +1,2 @@ 
 obj-y += tee.o
+obj-$(CONFIG_ARM_OPTEE) += optee.o
diff --git a/xen/arch/arm/tee/optee.c b/xen/arch/arm/tee/optee.c
new file mode 100644
index 0000000..0220691
--- /dev/null
+++ b/xen/arch/arm/tee/optee.c
@@ -0,0 +1,178 @@ 
+/*
+ * xen/arch/arm/tee/optee.c
+ *
+ * OP-TEE mediator
+ *
+ * Volodymyr Babchuk <volodymyr_babchuk@epam.com>
+ * Copyright (c) 2017 EPAM Systems.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <xen/types.h>
+#include <xen/sched.h>
+
+#include <asm/p2m.h>
+#include <asm/tee.h>
+
+#include "optee_msg.h"
+#include "optee_smc.h"
+
+/*
+ * OP-TEE violates SMCCC when it defines own UID. So we need
+ * to place bytes in correct order.
+ */
+#define OPTEE_UID  (xen_uuid_t){{                                               \
+    (uint8_t)(OPTEE_MSG_UID_0 >>  0), (uint8_t)(OPTEE_MSG_UID_0 >>  8),         \
+    (uint8_t)(OPTEE_MSG_UID_0 >> 16), (uint8_t)(OPTEE_MSG_UID_0 >> 24),         \
+    (uint8_t)(OPTEE_MSG_UID_1 >>  0), (uint8_t)(OPTEE_MSG_UID_1 >>  8),         \
+    (uint8_t)(OPTEE_MSG_UID_1 >> 16), (uint8_t)(OPTEE_MSG_UID_1 >> 24),         \
+    (uint8_t)(OPTEE_MSG_UID_2 >>  0), (uint8_t)(OPTEE_MSG_UID_2 >>  8),         \
+    (uint8_t)(OPTEE_MSG_UID_2 >> 16), (uint8_t)(OPTEE_MSG_UID_2 >> 24),         \
+    (uint8_t)(OPTEE_MSG_UID_3 >>  0), (uint8_t)(OPTEE_MSG_UID_3 >>  8),         \
+    (uint8_t)(OPTEE_MSG_UID_3 >> 16), (uint8_t)(OPTEE_MSG_UID_3 >> 24),         \
+    }}
+
+static int optee_init(void)
+{
+    printk("OP-TEE mediator init done\n");
+    return 0;
+}
+
+static void optee_domain_create(struct domain *d)
+{
+    /*
+     * Do nothing at this time.
+     * In the future this function will notify that new VM is started.
+     */
+}
+
+static void optee_domain_destroy(struct domain *d)
+{
+    /*
+     * Do nothing at this time.
+     * In the future this function will notify that VM is being destroyed.
+     */
+}
+
+static bool forward_call(struct cpu_user_regs *regs)
+{
+    register_t resp[4];
+
+    call_smccc_smc(get_user_reg(regs, 0),
+                   get_user_reg(regs, 1),
+                   get_user_reg(regs, 2),
+                   get_user_reg(regs, 3),
+                   get_user_reg(regs, 4),
+                   get_user_reg(regs, 5),
+                   get_user_reg(regs, 6),
+                   /* VM id 0 is reserved for hypervisor itself */
+                   current->domain->domain_id +1,
+                   resp);
+
+    set_user_reg(regs, 0, resp[0]);
+    set_user_reg(regs, 1, resp[1]);
+    set_user_reg(regs, 2, resp[2]);
+    set_user_reg(regs, 3, resp[3]);
+
+    return true;
+}
+
+static bool handle_get_shm_config(struct cpu_user_regs *regs)
+{
+    paddr_t shm_start;
+    size_t shm_size;
+    int rc;
+
+    printk("handle_get_shm_config\n");
+    /* Give all static SHM region to the Dom0 */
+    if ( current->domain->domain_id != 0 )
+        return false;
+
+    forward_call(regs);
+
+    /* Return error back to the guest */
+    if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
+        return true;
+
+    shm_start = get_user_reg(regs, 1);
+    shm_size = get_user_reg(regs, 2);
+
+    /* Dom0 is mapped 1:1 */
+    rc = map_regions_p2mt(current->domain, gaddr_to_gfn(shm_start),
+                          shm_size / PAGE_SIZE,
+                          maddr_to_mfn(shm_start),
+                          p2m_ram_rw);
+    if ( rc < 0 )
+    {
+        gprintk(XENLOG_INFO, "OP-TEE: Can't map static shm for Dom0: %d", rc);
+        set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);
+    }
+
+    return true;
+}
+
+static bool handle_exchange_capabilities(struct cpu_user_regs *regs)
+{
+        forward_call(regs);
+
+        printk("handle_exchange_capabilities\n");
+        /* Return error back to the guest */
+        if ( get_user_reg(regs, 0) != OPTEE_SMC_RETURN_OK)
+            return true;
+
+        /* Don't allow guests to work without dynamic SHM */
+        if (current->domain->domain_id != 0 &&
+            !(get_user_reg(regs, 1) & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) )
+            set_user_reg(regs, 0, OPTEE_SMC_RETURN_ENOTAVAIL);
+        return true;
+}
+
+static bool optee_handle_smc(struct cpu_user_regs *regs)
+{
+    /* At this time we support only calls from Dom0. */
+    if ( current->domain->domain_id != 0 )
+        return false;
+
+    switch ( get_user_reg(regs, 0) )
+    {
+    case OPTEE_SMC_GET_SHM_CONFIG:
+        return handle_get_shm_config(regs);
+    case OPTEE_SMC_EXCHANGE_CAPABILITIES:
+        return handle_exchange_capabilities(regs);
+    default:
+        return forward_call(regs);
+    }
+    return true;
+}
+
+static void optee_remove(void)
+{
+}
+
+static const struct tee_mediator_ops optee_ops =
+{
+    .init = optee_init,
+    .domain_create = optee_domain_create,
+    .domain_destroy = optee_domain_destroy,
+    .handle_smc = optee_handle_smc,
+    .remove = optee_remove,
+};
+
+REGISTER_TEE_MEDIATOR(optee, "OP-TEE", OPTEE_UID, &optee_ops);
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */