@@ -342,6 +342,8 @@ struct drm_i915_file_private {
} rps;
struct intel_engine_cs *bsd_ring;
+
+ u32 scheduler_queue_length;
};
enum intel_dpll_id {
@@ -1862,6 +1862,10 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ /* Throttle batch requests per device file */
+ if (i915_scheduler_file_queue_is_full(file))
+ return -EAGAIN;
+
/* Copy in the exec list from userland */
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
@@ -1945,6 +1949,10 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
}
+ /* Throttle batch requests per device file */
+ if (i915_scheduler_file_queue_is_full(file))
+ return -EAGAIN;
+
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
@@ -40,6 +40,8 @@ static void i915_scheduler_priority_bump_clear(struct i915_scheduler *sch
static int i915_scheduler_priority_bump(struct i915_scheduler *scheduler,
struct i915_scheduler_queue_entry *target,
uint32_t bump);
+static void i915_scheduler_file_queue_inc(struct drm_file *file);
+static void i915_scheduler_file_queue_dec(struct drm_file *file);
bool i915_scheduler_is_enabled(struct drm_device *dev)
{
@@ -74,6 +76,7 @@ int i915_scheduler_init(struct drm_device *dev)
scheduler->priority_level_max = 1023;
scheduler->priority_level_preempt = 900;
scheduler->min_flying = 2;
+ scheduler->file_queue_max = 64;
dev_priv->scheduler = scheduler;
@@ -267,6 +270,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
list_add_tail(&node->link, &scheduler->node_queue[ring->id]);
+ i915_scheduler_file_queue_inc(node->params.file);
+
if (i915.scheduler_override & i915_so_submit_on_queue)
not_flying = true;
else
@@ -551,6 +556,12 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
/* Strip the dependency info while the mutex is still locked */
i915_scheduler_remove_dependent(scheduler, node);
+ /* Likewise clean up the file descriptor before it might disappear. */
+ if (node->params.file) {
+ i915_scheduler_file_queue_dec(node->params.file);
+ node->params.file = NULL;
+ }
+
continue;
}
@@ -1194,6 +1205,7 @@ int i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file)
node->status,
ring->name);
+ i915_scheduler_file_queue_dec(node->params.file);
node->params.file = NULL;
}
}
@@ -1202,3 +1214,26 @@ int i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file)
return 0;
}
+
+bool i915_scheduler_file_queue_is_full(struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_private *dev_priv = file_priv->dev_priv;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+
+ return file_priv->scheduler_queue_length >= scheduler->file_queue_max;
+}
+
+static void i915_scheduler_file_queue_inc(struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ file_priv->scheduler_queue_length++;
+}
+
+static void i915_scheduler_file_queue_dec(struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ file_priv->scheduler_queue_length--;
+}
@@ -82,6 +82,7 @@ struct i915_scheduler {
int32_t priority_level_max;
int32_t priority_level_preempt;
uint32_t min_flying;
+ uint32_t file_queue_max;
};
/* Flag bits for i915_scheduler::flags */
@@ -110,5 +111,6 @@ int i915_scheduler_flush_stamp(struct intel_engine_cs *ring,
unsigned long stamp, bool is_locked);
bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req,
bool *completed, bool *busy);
+bool i915_scheduler_file_queue_is_full(struct drm_file *file);
#endif /* _I915_SCHEDULER_H_ */