@@ -81,6 +81,7 @@ struct tegra_drm_client_ops {
int (*submit)(struct tegra_drm_context *context,
struct drm_tegra_submit *args, struct drm_device *drm,
struct drm_file *file);
+ int (*get_streamid_offset)(struct tegra_drm_client *client);
};
int tegra_drm_submit(struct tegra_drm_context *context,
@@ -26,6 +26,7 @@ struct tegra_drm_file {
struct tegra_drm_channel_ctx {
struct tegra_drm_client *client;
struct host1x_channel *channel;
+ struct host1x_context *context;
struct xarray mappings;
};
@@ -337,6 +337,9 @@ static void release_job(struct host1x_job *job)
struct tegra_drm_submit_data *job_data = job->user_data;
u32 i;
+ if (job->context)
+ host1x_context_put(job->context);
+
for (i = 0; i < job_data->num_used_mappings; i++)
tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
@@ -398,6 +401,16 @@ int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
job->release = release_job;
job->timeout = 10000;
+ if (ctx->context && ctx->client->ops->get_streamid_offset) {
+ int offset = ctx->client->ops->get_streamid_offset(ctx->client);
+
+ if (offset >= 0) {
+ job->context = ctx->context;
+ job->engine_streamid_offset = offset;
+ host1x_context_get(job->context);
+ }
+ }
+
/*
* job_data is now part of job reference counting, so don't release
* it from here.
@@ -49,6 +49,9 @@ static void tegra_drm_channel_ctx_close(struct tegra_drm_channel_ctx *ctx)
unsigned long mapping_id;
struct tegra_drm_mapping *mapping;
+ if (ctx->context)
+ host1x_context_put(ctx->context);
+
xa_for_each(&ctx->mappings, mapping_id, mapping)
tegra_drm_mapping_put(mapping);
@@ -82,6 +85,7 @@ void tegra_drm_uapi_close_file(struct tegra_drm_file *file)
int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data,
struct drm_file *file)
{
+ struct host1x *host = tegra_drm_to_host1x(drm->dev_private);
struct tegra_drm_file *fpriv = file->driver_priv;
struct tegra_drm *tegra = drm->dev_private;
struct drm_tegra_channel_open *args = data;
@@ -116,10 +120,29 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data,
}
}
+ /* Only allocate context if the engine supports context isolation. */
+ if (client->ops->get_streamid_offset &&
+ client->ops->get_streamid_offset(client) >= 0) {
+ ctx->context =
+ host1x_context_alloc(host, get_task_pid(current, PIDTYPE_TGID));
+ if (IS_ERR(ctx->context)) {
+ if (PTR_ERR(ctx->context) != -EOPNOTSUPP) {
+ err = PTR_ERR(ctx->context);
+ goto put_channel;
+ } else {
+ /*
+ * OK, HW does not support contexts or contexts
+ * are disabled.
+ */
+ ctx->context = NULL;
+ }
+ }
+ }
+
err = xa_alloc(&fpriv->contexts, &args->channel_ctx, ctx,
XA_LIMIT(1, U32_MAX), GFP_KERNEL);
if (err < 0)
- goto put_channel;
+ goto put_context;
ctx->client = client;
xa_init_flags(&ctx->mappings, XA_FLAGS_ALLOC1);
@@ -128,6 +151,9 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data,
return 0;
+put_context:
+ if (ctx->context)
+ host1x_context_put(ctx->context);
put_channel:
host1x_channel_put(ctx->channel);
free_ctx:
@@ -188,7 +214,11 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data,
goto unlock;
}
- mapping->dev = ctx->client->base.dev;
+ if (ctx->context)
+ mapping->dev = &ctx->context->dev;
+ else
+ mapping->dev = ctx->client->base.dev;
+
mapping->bo = &container_of(gem, struct tegra_bo, gem)->base;
if (!iommu_get_domain_for_dev(mapping->dev) ||
For engines that support context isolation, allocate a context when opening a channel, and set up stream ID offset and context fields when submitting a job. Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com> --- drivers/gpu/drm/tegra/drm.h | 1 + drivers/gpu/drm/tegra/uapi.h | 1 + drivers/gpu/drm/tegra/uapi/submit.c | 13 +++++++++++ drivers/gpu/drm/tegra/uapi/uapi.c | 34 +++++++++++++++++++++++++++-- 4 files changed, 47 insertions(+), 2 deletions(-)