@@ -1001,6 +1001,69 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
return rc;
}
+static int optee_smc_lend_rstmem(struct optee *optee, struct tee_shm *rstmem,
+ u16 *end_points, unsigned int ep_count,
+ u32 use_case)
+{
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ msg_arg = optee_get_msg_arg(optee->ctx, 2, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_LEND_RSTMEM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_arg->params[0].u.value.a = use_case;
+ msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ msg_arg->params[1].u.tmem.buf_ptr = rstmem->paddr;
+ msg_arg->params[1].u.tmem.size = rstmem->size;
+ msg_arg->params[1].u.tmem.shm_ref = (u_long)rstmem;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out;
+ if (msg_arg->ret != TEEC_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+ rstmem->sec_world_id = (u_long)rstmem;
+
+out:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+ return rc;
+}
+
+static int optee_smc_reclaim_rstmem(struct optee *optee, struct tee_shm *rstmem)
+{
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_RECLAIM_RSTMEM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ msg_arg->params[0].u.rmem.shm_ref = (u_long)rstmem;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out;
+ if (msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+
+out:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+ return rc;
+}
+
/*
* 5. Asynchronous notification
*/
@@ -1252,6 +1315,8 @@ static const struct optee_ops optee_ops = {
.do_call_with_arg = optee_smc_do_call_with_arg,
.to_msg_param = optee_to_msg_param,
.from_msg_param = optee_from_msg_param,
+ .lend_rstmem = optee_smc_lend_rstmem,
+ .reclaim_rstmem = optee_smc_reclaim_rstmem,
};
static int enable_async_notif(optee_invoke_fn *invoke_fn)
@@ -1622,11 +1687,13 @@ static inline int optee_load_fw(struct platform_device *pdev,
static int optee_sdp_pool_init(struct optee *optee)
{
+ bool sdp = optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_SDP;
+ bool dyn_sdp = optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_RSTMEM;
enum tee_dma_heap_id heap_id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY;
- struct tee_rstmem_pool *pool;
- int rc;
+ struct tee_rstmem_pool *pool = ERR_PTR(-EINVAL);
+ int rc = -EINVAL;
- if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_SDP) {
+ if (sdp) {
union {
struct arm_smccc_res smccc;
struct optee_smc_get_sdp_config_result result;
@@ -1634,25 +1701,24 @@ static int optee_sdp_pool_init(struct optee *optee)
optee->smc.invoke_fn(OPTEE_SMC_GET_SDP_CONFIG, 0, 0, 0, 0, 0, 0,
0, &res.smccc);
- if (res.result.status != OPTEE_SMC_RETURN_OK) {
- pr_err("Secure Data Path service not available\n");
- return 0;
- }
+ if (res.result.status == OPTEE_SMC_RETURN_OK)
+ pool = tee_rstmem_static_pool_alloc(res.result.start,
+ res.result.size);
+ }
- pool = tee_rstmem_static_pool_alloc(res.result.start,
- res.result.size);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
+ if (dyn_sdp && IS_ERR(pool))
+ pool = optee_rstmem_alloc_cma_pool(optee, heap_id);
+ if (!IS_ERR(pool)) {
rc = tee_device_register_dma_heap(optee->teedev, heap_id, pool);
if (rc)
- goto err;
+ pool->ops->destroy_pool(pool);
}
+ if (rc && (sdp || dyn_sdp))
+ pr_err("Secure Data Path service not available\n");
+
return 0;
-err:
- pool->ops->destroy_pool(pool);
- return rc;
}
static int optee_probe(struct platform_device *pdev)
Add support in the OP-TEE backend driver for dynamic restricted memory allocation using the SMC ABI. Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org> --- drivers/tee/optee/smc_abi.c | 96 +++++++++++++++++++++++++++++++------ 1 file changed, 81 insertions(+), 15 deletions(-)