@@ -46,16 +46,21 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
+unsigned int drm_async_gpu = 0; /* 1 to enable async gpu wait */
+EXPORT_SYMBOL(drm_async_gpu);
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(async_gpu, "Async GPU wait");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(async_gpu, drm_async_gpu, int, 0600);
struct idr drm_minors_idr;
@@ -801,11 +801,13 @@ static void unpin_fbs(struct drm_device *dev,
}
}
+extern unsigned int drm_async_gpu;
+
static int pin_fbs(struct drm_device *dev,
struct intel_atomic_state *s)
{
int i, ret;
- bool nonblock = s->flags & DRM_MODE_ATOMIC_NONBLOCK;
+ bool nonblock = drm_async_gpu && (s->flags & DRM_MODE_ATOMIC_NONBLOCK);
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct intel_crtc_state *st = &s->crtc[i];
@@ -2359,6 +2361,12 @@ static void atomic_pipe_commit(struct drm_device *dev,
if (list_empty(&flips))
return;
+ if (!drm_async_gpu) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ intel_atomic_schedule_flips(dev_priv, intel_crtc, &flips);
+ return;
+ }
+
spin_lock_irqsave(&dev_priv->flip.lock, flags);
list_for_each_entry_safe(intel_flip, next, &flips, base.list)
intel_atomic_postpone_flip(dev, intel_flip);