@@ -324,6 +324,9 @@ void intel_engine_init_active(struct intel_engine_cs *engine,
#define ENGINE_MOCK 1
#define ENGINE_VIRTUAL 2
+void intel_engine_init_request_pool(struct intel_engine_cs *engine);
+void intel_engine_fini_request_pool(struct intel_engine_cs *engine);
+
static inline bool
intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
{
@@ -602,6 +602,30 @@ static int init_status_page(struct intel_engine_cs *engine)
return ret;
}
+static void *__mempool_alloc_slab(gfp_t gfp, void *data)
+{
+ return kmem_cache_alloc(data, gfp);
+}
+
+static void __mempool_free_slab(void *element, void *data)
+{
+ kmem_cache_free(data, element);
+}
+
+void intel_engine_init_request_pool(struct intel_engine_cs *engine)
+{
+ /* NB mempool_init_slab() does not handle TYPESAFE_BY_RCU */
+ mempool_init(&engine->request_pool,
+ INTEL_ENGINE_REQUEST_POOL_RESERVED,
+ __mempool_alloc_slab, __mempool_free_slab,
+ i915_request_slab_cache());
+}
+
+void intel_engine_fini_request_pool(struct intel_engine_cs *engine)
+{
+ mempool_exit(&engine->request_pool);
+}
+
static int engine_setup_common(struct intel_engine_cs *engine)
{
int err;
@@ -617,6 +641,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_execlists(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
+ intel_engine_init_request_pool(engine);
intel_engine_init_retire(engine);
intel_engine_pool_init(&engine->pool);
@@ -817,6 +842,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
cleanup_status_page(engine);
intel_engine_fini_retire(engine);
+ intel_engine_fini_request_pool(engine);
intel_engine_pool_fini(&engine->pool);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
@@ -13,6 +13,7 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/llist.h>
+#include <linux/mempool.h>
#include <linux/rbtree.h>
#include <linux/timer.h>
#include <linux/types.h>
@@ -308,6 +309,9 @@ struct intel_engine_cs {
struct list_head hold; /* ready requests, but on hold */
} active;
+ mempool_t request_pool; /* keep some in reserve for powermanagement */
+#define INTEL_ENGINE_REQUEST_POOL_RESERVED 2
+
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
@@ -4892,6 +4892,8 @@ static void virtual_context_destroy(struct kref *kref)
__execlists_context_fini(&ve->context);
intel_context_fini(&ve->context);
+ intel_engine_fini_request_pool(&ve->base);
+
kfree(ve->bonds);
kfree(ve);
}
@@ -5203,6 +5205,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
intel_engine_init_breadcrumbs(&ve->base);
intel_engine_init_execlists(&ve->base);
+ intel_engine_init_request_pool(&ve->base);
ve->base.cops = &virtual_context_ops;
ve->base.request_alloc = execlists_request_alloc;
@@ -266,6 +266,7 @@ static void mock_engine_release(struct intel_engine_cs *engine)
intel_context_put(engine->kernel_context);
intel_engine_fini_retire(engine);
+ intel_engine_fini_request_pool(engine);
intel_engine_fini_breadcrumbs(engine);
}
@@ -327,6 +328,7 @@ int mock_engine_init(struct intel_engine_cs *engine)
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
+ intel_engine_init_request_pool(engine);
intel_engine_init_retire(engine);
intel_engine_pool_init(&engine->pool);
@@ -54,6 +54,11 @@ static struct i915_global_request {
struct kmem_cache *slab_execute_cbs;
} global;
+struct kmem_cache *i915_request_slab_cache(void)
+{
+ return global.slab_requests;
+}
+
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
return dev_name(to_request(fence)->i915->drm.dev);
@@ -115,7 +120,7 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->submit);
i915_sw_fence_fini(&rq->semaphore);
- kmem_cache_free(global.slab_requests, rq);
+ mempool_free(rq, &rq->engine->request_pool);
}
const struct dma_fence_ops i915_fence_ops = {
@@ -629,14 +634,15 @@ static void retire_requests(struct intel_timeline *tl)
}
static noinline struct i915_request *
-request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
+request_alloc_slow(struct intel_timeline *tl, mempool_t *pool, gfp_t gfp)
{
struct i915_request *rq;
- if (list_empty(&tl->requests))
- goto out;
-
+ /* If we cannot wait, dip into our reserves */
if (!gfpflags_allow_blocking(gfp))
+ return mempool_alloc(pool, gfp);
+
+ if (list_empty(&tl->requests))
goto out;
/* Move our oldest request to the slab-cache (if not in use!) */
@@ -721,7 +727,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq = kmem_cache_alloc(global.slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- rq = request_alloc_slow(tl, gfp);
+ rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
@@ -807,7 +813,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
err_free:
- kmem_cache_free(global.slab_requests, rq);
+ mempool_free(rq, &ce->engine->request_pool);
err_unreserve:
intel_context_unpin(ce);
return ERR_PTR(ret);
@@ -300,6 +300,8 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
return fence->ops == &i915_fence_ops;
}
+struct kmem_cache *i915_request_slab_cache(void);
+
struct i915_request * __must_check
__i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check