@@ -351,13 +351,9 @@ struct v3d_timestamp_query {
struct drm_syncobj *syncobj;
};
-/* Number of perfmons required to handle all supported performance counters */
-#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_MAX_COUNTERS, \
- DRM_V3D_MAX_PERF_COUNTERS)
-
struct v3d_performance_query {
/* Performance monitor IDs for this query */
- u32 kperfmon_ids[V3D_MAX_PERFMONS];
+ u32 *kperfmon_ids;
/* Syncobj that indicates the query availability */
struct drm_syncobj *syncobj;
@@ -94,8 +94,10 @@ __v3d_performance_query_info_free(struct v3d_performance_query_info *qinfo,
if (qinfo->queries) {
unsigned int i;
- for (i = 0; i < count; i++)
+ for (i = 0; i < count; i++) {
drm_syncobj_put(qinfo->queries[i].syncobj);
+ kvfree(qinfo->queries[i].kperfmon_ids);
+ }
kvfree(qinfo->queries);
}
@@ -674,10 +674,20 @@ copy_query_info(struct v3d_performance_query_info *qinfo,
goto error;
}
+ query->kperfmon_ids =
+ kvmalloc_array(nperfmons,
+ sizeof(struct v3d_performance_query *),
+ GFP_KERNEL);
+ if (!query->kperfmon_ids) {
+ err = -ENOMEM;
+ goto error;
+ }
+
ids_pointer = u64_to_user_ptr(ids);
for (j = 0; j < nperfmons; j++) {
if (get_user(id, ids_pointer++)) {
+ kvfree(query->kperfmon_ids);
err = -EFAULT;
goto error;
}
@@ -687,6 +697,7 @@ copy_query_info(struct v3d_performance_query_info *qinfo,
query->syncobj = drm_syncobj_find(fpriv, sync);
if (!query->syncobj) {
+ kvfree(query->kperfmon_ids);
err = -ENOENT;
goto error;
}
@@ -721,9 +732,6 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
if (copy_from_user(&reset, ext, sizeof(reset)))
return -EFAULT;
- if (reset.nperfmons > V3D_MAX_PERFMONS)
- return -EINVAL;
-
job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
qinfo->queries = kvmalloc_array(reset.count,
@@ -770,9 +778,6 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
if (copy.pad)
return -EINVAL;
- if (copy.nperfmons > V3D_MAX_PERFMONS)
- return -EINVAL;
-
job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
qinfo->queries = kvmalloc_array(copy.count,