@@ -9,20 +9,21 @@
#include <linux/ascii85.h>
#include <linux/interconnect.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/nvmem-consumer.h>
#include <soc/qcom/ocmem.h>
+#include <drm/drm_file.h>
#include "adreno_gpu.h"
#include "a6xx_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
static u64 address_space_size = 0;
MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space");
module_param(address_space_size, ullong, 0600);
static bool zap_available = true;
@@ -391,47 +392,68 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
return -EINVAL;
break;
default:
if (len != 0)
return -EINVAL;
}
switch (param) {
case MSM_PARAM_COMM:
case MSM_PARAM_CMDLINE: {
- char *str, **paramp;
+ char *str, *str2, **paramp;
+ struct drm_file *file = ctx->file;
str = kmalloc(len + 1, GFP_KERNEL);
if (!str)
return -ENOMEM;
if (copy_from_user(str, u64_to_user_ptr(value), len)) {
kfree(str);
return -EFAULT;
}
/* Ensure string is null terminated: */
str[len] = '\0';
+ /*
+ * We need a 2nd copy for drm_file.. this copy can't replace
+ * our internal copy in the ctx, because we may need it for
+ * recovery/devcoredump after the file is already closed.
+ */
+ str2 = kstrdup(str, GFP_KERNEL);
+
mutex_lock(&gpu->lock);
if (param == MSM_PARAM_COMM) {
paramp = &ctx->comm;
} else {
paramp = &ctx->cmdline;
}
kfree(*paramp);
*paramp = str;
mutex_unlock(&gpu->lock);
+ mutex_lock(&file->override_lock);
+
+ if (param == MSM_PARAM_COMM) {
+ paramp = &file->override_comm;
+ } else {
+ paramp = &file->override_cmdline;
+ }
+
+ kfree(*paramp);
+ *paramp = str2;
+
+ mutex_unlock(&file->override_lock);
+
return 0;
}
case MSM_PARAM_SYSPROF:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return msm_file_private_set_sysprof(ctx, gpu, value);
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
}
@@ -574,20 +574,21 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
struct msm_file_private *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
INIT_LIST_HEAD(&ctx->submitqueues);
rwlock_init(&ctx->queuelock);
kref_init(&ctx->ref);
+ ctx->file = file;
msm_submitqueue_init(dev, ctx);
ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
file->driver_priv = ctx;
ctx->seqno = atomic_inc_return(&ident);
return 0;
}
@@ -596,20 +597,21 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
return context_init(dev, file);
}
static void context_close(struct msm_file_private *ctx)
{
+ ctx->file = NULL;
msm_submitqueue_close(ctx);
msm_file_private_put(ctx);
}
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
/*
@@ -352,20 +352,30 @@ struct msm_gpu_perfcntr {
* @seqno: unique per process seqno
*/
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
int queueid;
struct msm_gem_address_space *aspace;
struct kref ref;
int seqno;
+ /**
+ * @file: link back to the associated drm_file
+ *
+ * Note that msm_file_private can outlive the drm_file, ie.
+ * after the drm_file is closed but before jobs submitted have
+ * been cleaned up. After the drm_file is closed this will be
+ * NULL.
+ */
+ struct drm_file *file;
+
/**
* sysprof:
*
* The value of MSM_PARAM_SYSPROF set by userspace. This is
* intended to be used by system profiling tools like Mesa's
* pps-producer (perfetto), and restricted to CAP_SYS_ADMIN.
*
* Setting a value of 1 will preserve performance counters across
* context switches. Setting a value of 2 will in addition
* suppress suspend. (Performance counters lose state across