@@ -365,6 +365,13 @@ struct ffa_ctx {
struct list_head shm_list;
/* Number of allocated shared memory object */
unsigned int shm_count;
+ /*
+ * tx_lock is used to serialize access to tx
+ * rx_lock is used to serialize access to rx
+ * lock is used for the rest in this struct
+ */
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
spinlock_t lock;
};
@@ -815,7 +822,9 @@ static int32_t handle_partition_info_get(uint32_t w1, uint32_t w2, uint32_t w3,
if ( !ffa_rx )
return FFA_RET_DENIED;
- spin_lock(&ctx->lock);
+ if ( !spin_trylock(&ctx->rx_lock) )
+ return FFA_RET_BUSY;
+
if ( !ctx->page_count || !ctx->rx_is_free )
goto out;
spin_lock(&ffa_rx_buffer_lock);
@@ -866,7 +875,7 @@ out_rx_release:
out_rx_buf_unlock:
spin_unlock(&ffa_rx_buffer_lock);
out:
- spin_unlock(&ctx->lock);
+ spin_unlock(&ctx->rx_lock);
return ret;
}
@@ -877,13 +886,15 @@ static int32_t handle_rx_release(void)
struct domain *d = current->domain;
struct ffa_ctx *ctx = d->arch.tee;
- spin_lock(&ctx->lock);
+ if ( !spin_trylock(&ctx->rx_lock) )
+ return FFA_RET_BUSY;
+
if ( !ctx->page_count || ctx->rx_is_free )
goto out;
ret = FFA_RET_OK;
ctx->rx_is_free = true;
out:
- spin_unlock(&ctx->lock);
+ spin_unlock(&ctx->rx_lock);
return ret;
}
@@ -994,21 +1005,43 @@ static void put_shm_pages(struct ffa_shm_mem *shm)
}
}
+static bool inc_ctx_shm_count(struct ffa_ctx *ctx)
+{
+ bool ret = true;
+
+ spin_lock(&ctx->lock);
+ if (ctx->shm_count >= FFA_MAX_SHM_COUNT)
+ ret = false;
+ else
+ ctx->shm_count++;
+ spin_unlock(&ctx->lock);
+
+ return ret;
+}
+
+static void dec_ctx_shm_count(struct ffa_ctx *ctx)
+{
+ spin_lock(&ctx->lock);
+ ASSERT(ctx->shm_count > 0);
+ ctx->shm_count--;
+ spin_unlock(&ctx->lock);
+}
+
static struct ffa_shm_mem *alloc_ffa_shm_mem(struct ffa_ctx *ctx,
unsigned int page_count)
{
struct ffa_shm_mem *shm;
- if ( page_count >= FFA_MAX_SHM_PAGE_COUNT ||
- ctx->shm_count >= FFA_MAX_SHM_COUNT )
+ if ( page_count >= FFA_MAX_SHM_PAGE_COUNT )
+ return NULL;
+ if ( !inc_ctx_shm_count(ctx) )
return NULL;
shm = xzalloc_flex_struct(struct ffa_shm_mem, pages, page_count);
if ( shm )
- {
- ctx->shm_count++;
shm->page_count = page_count;
- }
+ else
+ dec_ctx_shm_count(ctx);
return shm;
}
@@ -1018,8 +1051,7 @@ static void free_ffa_shm_mem(struct ffa_ctx *ctx, struct ffa_shm_mem *shm)
if ( !shm )
return;
- ASSERT(ctx->shm_count > 0);
- ctx->shm_count--;
+ dec_ctx_shm_count(ctx);
put_shm_pages(shm);
xfree(shm);
}
@@ -1299,7 +1331,11 @@ static void handle_mem_share(struct cpu_user_regs *regs)
goto out_set_ret;
}
- spin_lock(&ctx->lock);
+ if ( !spin_trylock(&ctx->tx_lock) )
+ {
+ ret = FFA_RET_BUSY;
+ goto out_set_ret;
+ }
if ( frag_len > ctx->page_count * FFA_PAGE_SIZE )
goto out_unlock;
@@ -1421,7 +1457,9 @@ static void handle_mem_share(struct cpu_user_regs *regs)
if ( ret )
goto out;
+ spin_lock(&ctx->lock);
list_add_tail(&shm->list, &ctx->shm_list);
+ spin_unlock(&ctx->lock);
uint64_to_regpair(&handle_hi, &handle_lo, shm->handle);
@@ -1429,7 +1467,7 @@ out:
if ( ret )
free_ffa_shm_mem(ctx, shm);
out_unlock:
- spin_unlock(&ctx->lock);
+ spin_unlock(&ctx->tx_lock);
out_set_ret:
if ( ret > 0 )
@@ -1464,7 +1502,12 @@ static void handle_mem_frag_tx(struct cpu_user_regs *regs)
uint16_t sender_id = 0;
int ret;
- spin_lock(&ctx->lock);
+ if ( !spin_trylock(&ctx->tx_lock) )
+ {
+ ret = FFA_RET_BUSY;
+ goto out_set_ret;
+ }
+
s = find_frag_state(ctx, handle);
if ( !s )
{
@@ -1489,15 +1532,20 @@ static void handle_mem_frag_tx(struct cpu_user_regs *regs)
spin_unlock(&ffa_tx_buffer_lock);
if ( ret < 0 )
goto out_free_s;
+
+ spin_lock(&ctx->lock);
list_add_tail(&s->shm->list, &ctx->shm_list);
+ spin_unlock(&ctx->lock);
+
out_free_s:
if ( ret < 0 )
free_ffa_shm_mem(ctx, s->shm);
list_del(&s->list);
xfree(s);
out:
- spin_unlock(&ctx->lock);
+ spin_unlock(&ctx->tx_lock);
+out_set_ret:
if ( ret > 0 )
set_regs_frag_rx(regs, handle_lo, handle_hi, ret, sender_id);
else if ( ret == 0)
@@ -1506,6 +1554,18 @@ out:
set_regs_error(regs, ret);
}
+/* Must only be called with ctx->lock held */
+static struct ffa_shm_mem *find_shm_mem(struct ffa_ctx *ctx, uint64_t handle)
+{
+ struct ffa_shm_mem *shm;
+
+ list_for_each_entry(shm, &ctx->shm_list, list)
+ if ( shm->handle == handle )
+ return shm;
+
+ return NULL;
+}
+
static int handle_mem_reclaim(uint64_t handle, uint32_t flags)
{
struct domain *d = current->domain;
@@ -1516,29 +1576,26 @@ static int handle_mem_reclaim(uint64_t handle, uint32_t flags)
int ret;
spin_lock(&ctx->lock);
- list_for_each_entry(shm, &ctx->shm_list, list)
- {
- if ( shm->handle == handle )
- goto found_it;
- }
- shm = NULL;
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out;
-found_it:
+ shm = find_shm_mem(ctx, handle);
+ if ( shm )
+ list_del(&shm->list);
+ spin_unlock(&ctx->lock);
+ if ( !shm )
+ return FFA_RET_INVALID_PARAMETERS;
uint64_to_regpair(&handle_hi, &handle_lo, handle);
ret = ffa_mem_reclaim(handle_lo, handle_hi, flags);
+
if ( ret )
{
- shm = NULL;
- goto out;
+ spin_lock(&ctx->lock);
+ list_add_tail(&shm->list, &ctx->shm_list);
+ spin_unlock(&ctx->lock);
+ }
+ else
+ {
+ free_ffa_shm_mem(ctx, shm);
}
-
- list_del(&shm->list);
-
-out:
- free_ffa_shm_mem(ctx, shm);
- spin_unlock(&ctx->lock);
return ret;
}
The single lock in struct ffa_ctx is complemented with rx_lock and tx_lock. The old lock is used for small critical sections, like increasing shm_count or adding another shm to shm_list. rx_lock and tx_lock are only acquired using spin_trylock() which for well-behaving guests should always succeed. Guests using the RX and TX buffers are expected to serialize accesses before doing the FF-A request. Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org> --- xen/arch/arm/tee/ffa.c | 121 ++++++++++++++++++++++++++++++----------- 1 file changed, 89 insertions(+), 32 deletions(-)