@@ -565,6 +565,8 @@ void v3d_mmu_remove_ptes(struct v3d_bo *bo);
/* v3d_sched.c */
void __v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *qinfo,
unsigned int count);
+void __v3d_performance_query_info_free(struct v3d_performance_query_info *qinfo,
+ unsigned int count);
void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d);
@@ -87,20 +87,30 @@ __v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *qinfo,
}
}
+void
+__v3d_performance_query_info_free(struct v3d_performance_query_info *qinfo,
+ unsigned int count)
+{
+ if (qinfo->queries) {
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ drm_syncobj_put(qinfo->queries[i].syncobj);
+
+ kvfree(qinfo->queries);
+ }
+}
+
static void
v3d_cpu_job_free(struct drm_sched_job *sched_job)
{
struct v3d_cpu_job *job = to_cpu_job(sched_job);
- struct v3d_performance_query_info *performance_query = &job->performance_query;
__v3d_timestamp_query_info_free(&job->timestamp_query,
job->timestamp_query.count);
- if (performance_query->queries) {
- for (int i = 0; i < performance_query->count; i++)
- drm_syncobj_put(performance_query->queries[i].syncobj);
- kvfree(performance_query->queries);
- }
+ __v3d_performance_query_info_free(&job->performance_query,
+ job->performance_query.count);
v3d_job_cleanup(&job->base);
}
@@ -645,6 +645,7 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
struct drm_v3d_reset_performance_query reset;
struct v3d_performance_query_info *qinfo = &job->performance_query;
unsigned int i, j;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -680,32 +681,36 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
u32 id;
if (get_user(sync, syncs++)) {
- kvfree(qinfo->queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
- qinfo->queries[i].syncobj = drm_syncobj_find(file_priv, sync);
-
if (get_user(ids, kperfmon_ids++)) {
- kvfree(qinfo->queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
ids_pointer = u64_to_user_ptr(ids);
for (j = 0; j < reset.nperfmons; j++) {
if (get_user(id, ids_pointer++)) {
- kvfree(qinfo->queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
qinfo->queries[i].kperfmon_ids[j] = id;
}
+
+ qinfo->queries[i].syncobj = drm_syncobj_find(file_priv, sync);
}
qinfo->count = reset.count;
qinfo->nperfmons = reset.nperfmons;
return 0;
+
+error:
+ __v3d_performance_query_info_free(qinfo, i);
+ return err;
}
static int
@@ -718,6 +723,7 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
struct drm_v3d_copy_performance_query copy;
struct v3d_performance_query_info *qinfo = &job->performance_query;
unsigned int i, j;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -756,27 +762,27 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
u32 id;
if (get_user(sync, syncs++)) {
- kvfree(qinfo->queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
- qinfo->queries[i].syncobj = drm_syncobj_find(file_priv, sync);
-
if (get_user(ids, kperfmon_ids++)) {
- kvfree(qinfo->queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
ids_pointer = u64_to_user_ptr(ids);
for (j = 0; j < copy.nperfmons; j++) {
if (get_user(id, ids_pointer++)) {
- kvfree(qinfo->queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
qinfo->queries[i].kperfmon_ids[j] = id;
}
+
+ qinfo->queries[i].syncobj = drm_syncobj_find(file_priv, sync);
}
qinfo->count = copy.count;
qinfo->nperfmons = copy.nperfmons;
@@ -789,6 +795,10 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
job->copy.stride = copy.stride;
return 0;
+
+error:
+ __v3d_performance_query_info_free(qinfo, i);
+ return err;
}
/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data