@@ -1364,43 +1364,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
-{
- /*
- * A single pass should suffice to release all the freed objects (along
- * most call paths) , but be a little more paranoid in that freeing
- * the objects does take a little amount of time, during which the rcu
- * callbacks could have added new objects into the freed list, and
- * armed the work again.
- */
- while (atomic_read(&i915->mm.free_count)) {
- flush_delayed_work(&i915->mm.free_work);
- flush_delayed_work(&i915->bdev.wq);
- rcu_barrier();
- }
-}
-
-static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
-{
- /*
- * Similar to objects above (see i915_gem_drain_freed-objects), in
- * general we have workers that are armed by RCU and then rearm
- * themselves in their callbacks. To be paranoid, we need to
- * drain the workqueue a second time after waiting for the RCU
- * grace period so that we catch work queued via RCU from the first
- * pass. As neither drain_workqueue() nor flush_workqueue() report
- * a result, we make an assumption that we only don't require more
- * than 3 passes to catch all _recursive_ RCU delayed work.
- *
- */
- int pass = 3;
- do {
- flush_workqueue(i915->wq);
- rcu_barrier();
- i915_gem_drain_freed_objects(i915);
- } while (--pass);
- drain_workqueue(i915->wq);
-}
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915);
+void i915_gem_drain_workqueue(struct drm_i915_private *i915);
struct i915_vma * __must_check
i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
@@ -1085,6 +1085,44 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return err;
}
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+ /*
+ * A single pass should suffice to release all the freed objects (along
+ * most call paths) , but be a little more paranoid in that freeing
+ * the objects does take a little amount of time, during which the rcu
+ * callbacks could have added new objects into the freed list, and
+ * armed the work again.
+ */
+ while (atomic_read(&i915->mm.free_count)) {
+ flush_delayed_work(&i915->mm.free_work);
+ flush_delayed_work(&i915->bdev.wq);
+ rcu_barrier();
+ }
+}
+
+void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+{
+ /*
+ * Similar to objects above (see i915_gem_drain_freed-objects), in
+ * general we have workers that are armed by RCU and then rearm
+ * themselves in their callbacks. To be paranoid, we need to
+ * drain the workqueue a second time after waiting for the RCU
+ * grace period so that we catch work queued via RCU from the first
+ * pass. As neither drain_workqueue() nor flush_workqueue() report
+ * a result, we make an assumption that we only don't require more
+ * than 3 passes to catch all _recursive_ RCU delayed work.
+ *
+ */
+ int pass = 3;
+ do {
+ flush_workqueue(i915->wq);
+ rcu_barrier();
+ i915_gem_drain_freed_objects(i915);
+ } while (--pass);
+ drain_workqueue(i915->wq);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
As best I can tell these aren't used in the kind of hotpaths that mandate using static inline. They do make header cleanup harder, though, so un-inline. Signed-off-by: Jani Nikula <jani.nikula@intel.com> --- drivers/gpu/drm/i915/i915_drv.h | 39 ++------------------------------- drivers/gpu/drm/i915/i915_gem.c | 38 ++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 37 deletions(-)