@@ -427,6 +427,19 @@ static void a3xx_dump(struct msm_gpu *gpu)
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
adreno_dump(gpu);
}
+
+static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = adreno_gpu_state_get(gpu);
+
+ if (IS_ERR(state))
+ return state;
+
+ state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
+
+ return state;
+}
+
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
@@ -453,6 +466,8 @@ static const struct adreno_gpu_funcs funcs = {
#ifdef CONFIG_DEBUG_FS
.show = a3xx_show,
#endif
+ .gpu_state_get = a3xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
};
@@ -465,6 +465,18 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
}
#endif
+static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = adreno_gpu_state_get(gpu);
+
+ if (IS_ERR(state))
+ return state;
+
+ state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
+
+ return state;
+}
+
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
@@ -541,6 +553,8 @@ static const struct adreno_gpu_funcs funcs = {
#ifdef CONFIG_DEBUG_FS
.show = a4xx_show,
#endif
+ .gpu_state_get = a4xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
.get_timestamp = a4xx_get_timestamp,
};
@@ -1195,6 +1195,26 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
+static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state;
+
+ /*
+ * Temporarily disable hardware clock gating before going into
+ * adreno_show to avoid issues while reading the registers
+ */
+ a5xx_set_hwcg(gpu, false);
+
+ state = adreno_gpu_state_get(gpu);
+
+ if (!IS_ERR(state))
+ state->rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
+
+ a5xx_set_hwcg(gpu, true);
+
+ return state;
+}
+
#ifdef CONFIG_DEBUG_FS
static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
@@ -1244,6 +1264,8 @@ static const struct adreno_gpu_funcs funcs = {
.debugfs_init = a5xx_debugfs_init,
#endif
.gpu_busy = a5xx_gpu_busy,
+ .gpu_state_get = a5xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
.get_timestamp = a5xx_get_timestamp,
};
@@ -368,6 +368,60 @@ bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return false;
}
+struct msm_gpu_state *adreno_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_gpu_state *state;
+ int i, count = 0;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ do_gettimeofday(&state->time);
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ state->ring[i].fence = gpu->rb[i]->memptrs->fence;
+ state->ring[i].seqno = gpu->rb[i]->seqno;
+ state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
+ state->ring[i].wptr = get_wptr(gpu->rb[i]);
+ }
+
+ /* Count the number of registers */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
+ count += adreno_gpu->registers[i + 1] -
+ adreno_gpu->registers[i] + 1;
+
+ state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
+ if (state->registers) {
+ int pos = 0;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ uint32_t start = adreno_gpu->registers[i];
+ uint32_t end = adreno_gpu->registers[i+1];
+ uint32_t addr;
+
+ for (addr = start; addr <= end; addr++) {
+ state->registers[pos++] = addr;
+ state->registers[pos++] = gpu_read(gpu, addr);
+ }
+ }
+
+ state->nr_registers = count;
+ }
+
+ return state;
+}
+
+void adreno_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ kfree(state->registers);
+ kfree(state);
+}
+
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
{
@@ -228,6 +228,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+struct msm_gpu_state *adreno_gpu_state_get(struct msm_gpu *gpu);
+void adreno_gpu_state_put(struct msm_gpu_state *state);
+
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
@@ -27,6 +27,7 @@
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+struct msm_gpu_state;
struct msm_gpu_config {
const char *ioname;
@@ -69,6 +70,8 @@ struct msm_gpu_funcs {
int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
+ struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
+ void (*gpu_state_put)(struct msm_gpu_state *state);
};
struct msm_gpu {
@@ -175,6 +178,22 @@ struct msm_gpu_submitqueue {
struct kref ref;
};
+struct msm_gpu_state {
+ struct timeval time;
+
+ struct {
+ u32 fence;
+ u32 seqno;
+ u32 rptr;
+ u32 wptr;
+ } ring[MSM_GPU_MAX_RINGS];
+
+ int nr_registers;
+ u32 *registers;
+
+ u32 rbbm_status;
+};
+
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
Add the infrastructure to capture the state current state of the GPU and store it in memory. This is useful for storing the state of a hung GPU so it can be dumped later. For now grab the same basic ringbuffer information and registers that are provided by the debugfs 'gpu' node but obviously this can be extended to capture a much larger set of GPU information. Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org> --- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 15 +++++++++ drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 14 +++++++++ drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 22 ++++++++++++++ drivers/gpu/drm/msm/adreno/adreno_gpu.c | 54 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/msm/adreno/adreno_gpu.h | 3 ++ drivers/gpu/drm/msm/msm_gpu.h | 19 ++++++++++++ 6 files changed, 127 insertions(+)