@@ -22,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -40,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -34,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -75,7 +75,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -129,7 +129,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
unsigned int i, ibs = 0;
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
- gpu->cur_ctx_seqno = 0;
+ ring->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
return;
}
@@ -164,7 +164,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -109,7 +109,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
- if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
+ if (ctx->seqno == ring->cur_ctx_seqno)
return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@@ -219,7 +219,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -305,7 +305,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -843,6 +843,7 @@ static int hw_init(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u64 gmem_range_min;
+ unsigned int i;
int ret;
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
@@ -1138,7 +1139,8 @@ static int hw_init(struct msm_gpu *gpu)
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
- gpu->cur_ctx_seqno = 0;
+ for (i = 0; i < gpu->nr_rings; i++)
+ gpu->rb[i]->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
@@ -783,7 +783,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
- gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
+ submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno;
pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
@@ -193,17 +193,6 @@ struct msm_gpu {
*/
refcount_t sysprof_active;
- /**
- * cur_ctx_seqno:
- *
- * The ctx->seqno value of the last context to submit rendering,
- * and the one with current pgtables installed (for generations
- * that support per-context pgtables). Tracked by seqno rather
- * than pointer value to avoid dangling pointers, and cases where
- * a ctx can be freed and a new one created with the same address.
- */
- int cur_ctx_seqno;
-
/**
* lock:
*
@@ -100,6 +100,16 @@ struct msm_ringbuffer {
* preemption. Can be aquired from irq context.
*/
spinlock_t preempt_lock;
+
+ /**
+ * cur_ctx_seqno:
+ *
+ * The ctx->seqno value of the last context to submit to this ring
+ * Tracked by seqno rather than pointer value to avoid dangling
+ * pointers, and cases where a ctx can be freed and a new one created
+ * with the same address.
+ */
+ int cur_ctx_seqno;
};
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,