b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -281,10 +281,49 @@ static uint32_t ring_freewords(struct msm_gpu *gpu)
return (rptr + (size - 1) - wptr) % size;
}
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+void adreno_wait_ring_contiguous(struct msm_gpu *gpu,
+ uint32_t ndwords)
{
- if (spin_until(ring_freewords(gpu) >= ndwords))
- DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t size = gpu->rb->size/4;
+ uint32_t wptr;
+ uint32_t rptr;
+
+ /* Wait for free space and then check if they are contiguous */
+ if(spin_until(ring_freewords(gpu)>= ndwords)){
+ DRM_ERROR("%s: timeout waiting for ringbuffer space\n",
+ gpu->name);
+ return;
+ }
+
+ wptr = get_wptr(gpu->rb);
+ rptr = adreno_gpu->memptrs->rptr;
+
+ /* We have enough space in the ring for ndwords. Three conditions
+ * indicates we have contigous space:
+ * (1) wptr can be equal to size, ring has wrapped and wptr is 0
+ * (see OUT_RING), meaning we have enough space.
+ * (2) We have enough space in the ring, wptr < rptr indicates
+ * enough contiguous space
+ * (3) wptr + ndwords < size - 1 implies enough space in the ring.
+ */
+ if((wptr == size) || (wptr < rptr) || (wptr + ndwords < size - 1))
+ return;
+
+ /* Fill the end of ring with no-ops
+ * */
+ OUT_RING(gpu->rb, CP_TYPE3_PKT | (((size - wptr - 1) - 1) << 16) |
+ ((CP_NOP & 0xFF) << 8));
+ gpu->rb->cur = gpu->rb->start;
+
+ /* We have reset cur pointer to start. If ring_freewords returns
+ * greater than ndwords, then we have contigous space.
+ * */
+ if(spin_until(ring_freewords(gpu)>= ndwords)){
+ DRM_ERROR("%s: timeout waiting for ringbuffer space\n",
+ gpu->name);
+ return;
+ }
}
static const char *iommu_ports[] = {
b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -247,7 +247,7 @@ void adreno_idle(struct msm_gpu *gpu);
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
void adreno_dump(struct msm_gpu *gpu);
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+void adreno_wait_ring_contiguous(struct msm_gpu *gpu, uint32_t ndwords);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs);
@@ -259,7 +259,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu);
static inline void
OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring_contiguous(ring->gpu, cnt+1);
OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
}
@@ -267,14 +267,14 @@ OUT_PKT0(struct msm_ringbuffer *ring, uint16_t
regindx, uint16_t cnt)
static inline void
OUT_PKT2(struct msm_ringbuffer *ring)
{
- adreno_wait_ring(ring->gpu, 1);
+ adreno_wait_ring_contiguous(ring->gpu, 1);
OUT_RING(ring, CP_TYPE2_PKT);
}
static inline void
OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring_contiguous(ring->gpu, cnt+1);
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
Splitting the command sequence for an IB1 submission at the end of the ring buffer can hang the GPU. To fix this, if there isn't enough contiguous space at the end to fit the full command sequence, insert NOPs at the end, and write the sequence at the start, as space becomes available. Signed-off-by: Aravind Ganesan <aravindg@codeaurora.org> --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 45 ++++++++++++++++++++++++++++++--- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 8 +++--- 2 files changed, 46 insertions(+), 7 deletions(-)