@@ -1121,7 +1121,7 @@ static struct verbs_context *mlx5_alloc_context(struct ibv_device *ibdev,
context->bfs[bfi].reg = context->uar[i].reg + MLX5_ADAPTER_PAGE_SIZE * j +
MLX5_BF_OFFSET + k * context->bf_reg_size;
context->bfs[bfi].need_lock = need_uuar_lock(context, bfi);
- mlx5_spinlock_init(&context->bfs[bfi].lock);
+ mlx5_spinlock_init(&context->bfs[bfi].lock, context->bfs[bfi].need_lock);
context->bfs[bfi].offset = 0;
if (bfi)
context->bfs[bfi].buf_size = context->bf_reg_size / 2;
@@ -1153,7 +1153,7 @@ static struct verbs_context *mlx5_alloc_context(struct ibv_device *ibdev,
mlx5_read_env(ibdev, context);
- mlx5_spinlock_init(&context->hugetlb_lock);
+ mlx5_spinlock_init(&context->hugetlb_lock, !mlx5_single_threaded);
list_head_init(&context->hugetlb_list);
verbs_set_ops(v_ctx, &mlx5_ctx_common_ops);
@@ -207,6 +207,7 @@ struct mlx5_db_page;
struct mlx5_spinlock {
pthread_spinlock_t lock;
int in_use;
+ int need_lock;
};
enum mlx5_uar_type {
@@ -844,7 +845,7 @@ static inline void *mlx5_find_uidx(struct mlx5_context *ctx, uint32_t uidx)
static inline int mlx5_spin_lock(struct mlx5_spinlock *lock)
{
- if (!mlx5_single_threaded)
+ if (lock->need_lock)
return pthread_spin_lock(&lock->lock);
if (unlikely(lock->in_use)) {
@@ -866,7 +867,7 @@ static inline int mlx5_spin_lock(struct mlx5_spinlock *lock)
static inline int mlx5_spin_unlock(struct mlx5_spinlock *lock)
{
- if (!mlx5_single_threaded)
+ if (lock->need_lock)
return pthread_spin_unlock(&lock->lock);
lock->in_use = 0;
@@ -874,9 +875,10 @@ static inline int mlx5_spin_unlock(struct mlx5_spinlock *lock)
return 0;
}
-static inline int mlx5_spinlock_init(struct mlx5_spinlock *lock)
+static inline int mlx5_spinlock_init(struct mlx5_spinlock *lock, int need_lock)
{
lock->in_use = 0;
+ lock->need_lock = need_lock;
return pthread_spin_init(&lock->lock, PTHREAD_PROCESS_PRIVATE);
}
@@ -645,7 +645,7 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context,
memset(&cmd, 0, sizeof cmd);
cq->cons_index = 0;
- if (mlx5_spinlock_init(&cq->lock))
+ if (mlx5_spinlock_init(&cq->lock, !mlx5_single_threaded))
goto err;
ncqe = align_queue_size(cq_attr->cqe + 1);
@@ -907,7 +907,7 @@ struct ibv_srq *mlx5_create_srq(struct ibv_pd *pd,
ibsrq = &srq->vsrq.srq;
memset(&cmd, 0, sizeof cmd);
- if (mlx5_spinlock_init(&srq->lock)) {
+ if (mlx5_spinlock_init(&srq->lock, !mlx5_single_threaded)) {
fprintf(stderr, "%s-%d:\n", __func__, __LINE__);
goto err;
}
@@ -1751,8 +1751,8 @@ static struct ibv_qp *create_qp(struct ibv_context *context,
mlx5_init_qp_indices(qp);
- if (mlx5_spinlock_init(&qp->sq.lock) ||
- mlx5_spinlock_init(&qp->rq.lock))
+ if (mlx5_spinlock_init(&qp->sq.lock, !mlx5_single_threaded) ||
+ mlx5_spinlock_init(&qp->rq.lock, !mlx5_single_threaded))
goto err_free_qp_buf;
qp->db = mlx5_alloc_dbrec(ctx);
@@ -2495,7 +2495,7 @@ struct ibv_srq *mlx5_create_srq_ex(struct ibv_context *context,
memset(&cmd, 0, sizeof(cmd));
memset(&resp, 0, sizeof(resp));
- if (mlx5_spinlock_init(&msrq->lock)) {
+ if (mlx5_spinlock_init(&msrq->lock, !mlx5_single_threaded)) {
fprintf(stderr, "%s-%d:\n", __func__, __LINE__);
goto err;
}
@@ -2799,7 +2799,7 @@ static struct ibv_wq *create_wq(struct ibv_context *context,
mlx5_init_rwq_indices(rwq);
- if (mlx5_spinlock_init(&rwq->rq.lock))
+ if (mlx5_spinlock_init(&rwq->rq.lock, !mlx5_single_threaded))
goto err_free_rwq_buf;
rwq->db = mlx5_alloc_dbrec(ctx);
Currently, the locks on the verbs objects can be unset only globally through the MLX5_SINGLE_THREADED environment variable. New verbs, such as ibv_alloc_td, allow the application to guarantee the access to certain verbs objects from only one user thread, eliminating the need for a lock on those objects. This patch allows the driver to toggle locking on individual verbs objects. Signed-off-by: Rohit Zambre <rzambre@uci.edu> --- providers/mlx5/mlx5.c | 4 ++-- providers/mlx5/mlx5.h | 8 +++++--- providers/mlx5/verbs.c | 12 ++++++------ 3 files changed, 13 insertions(+), 11 deletions(-)