@@ -334,6 +334,8 @@ struct drm_i915_file_private {
} rps;
struct intel_engine_cs *bsd_ring;
+
+ u32 scheduler_queue_length;
};
enum intel_dpll_id {
@@ -1935,6 +1935,10 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ /* Throttle batch requests per device file */
+ if (i915_scheduler_file_queue_is_full(file))
+ return -EAGAIN;
+
/* Copy in the exec list from userland */
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
@@ -2018,6 +2022,10 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
}
+ /* Throttle batch requests per device file */
+ if (i915_scheduler_file_queue_is_full(file))
+ return -EAGAIN;
+
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
@@ -40,6 +40,8 @@ static void i915_scheduler_priority_bump_clear(struct i915_scheduler *sch
static int i915_scheduler_priority_bump(struct i915_scheduler *scheduler,
struct i915_scheduler_queue_entry *target,
uint32_t bump);
+static void i915_scheduler_file_queue_inc(struct drm_file *file);
+static void i915_scheduler_file_queue_dec(struct drm_file *file);
bool i915_scheduler_is_enabled(struct drm_device *dev)
{
@@ -75,6 +77,7 @@ int i915_scheduler_init(struct drm_device *dev)
scheduler->priority_level_max = ~0U;
scheduler->priority_level_preempt = 900;
scheduler->min_flying = 2;
+ scheduler->file_queue_max = 64;
dev_priv->scheduler = scheduler;
@@ -249,6 +252,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
list_add_tail(&node->link, &scheduler->node_queue[ring->id]);
+ i915_scheduler_file_queue_inc(node->params.file);
+
if (i915.scheduler_override & i915_so_submit_on_queue)
not_flying = true;
else
@@ -630,6 +635,12 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
/* Strip the dependency info while the mutex is still locked */
i915_scheduler_remove_dependent(scheduler, node);
+ /* Likewise clean up the file descriptor before it might disappear. */
+ if (node->params.file) {
+ i915_scheduler_file_queue_dec(node->params.file);
+ node->params.file = NULL;
+ }
+
continue;
}
@@ -1330,3 +1341,26 @@ int i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file)
return 0;
}
+
+bool i915_scheduler_file_queue_is_full(struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_private *dev_priv = file_priv->dev_priv;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+
+ return file_priv->scheduler_queue_length >= scheduler->file_queue_max;
+}
+
+static void i915_scheduler_file_queue_inc(struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ file_priv->scheduler_queue_length++;
+}
+
+static void i915_scheduler_file_queue_dec(struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ file_priv->scheduler_queue_length--;
+}
@@ -87,6 +87,7 @@ struct i915_scheduler {
uint32_t priority_level_max;
uint32_t priority_level_preempt;
uint32_t min_flying;
+ uint32_t file_queue_max;
};
/* Flag bits for i915_scheduler::flags */
@@ -120,5 +121,6 @@ int i915_scheduler_flush_request(struct drm_i915_gem_request *req,
bool is_locked);
bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req,
bool *completed, bool *busy);
+bool i915_scheduler_file_queue_is_full(struct drm_file *file);
#endif /* _I915_SCHEDULER_H_ */