diff mbox

[RFC,16/44] drm/i915: Alloc early seqno

Message ID 1403803475-16337-17-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison June 26, 2014, 5:24 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

The scheduler needs to explicitly allocate a seqno to track each submitted batch
buffer. This must happen a long time before any commands are actually written to
the ring.
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |    5 +++++
 drivers/gpu/drm/i915/intel_ringbuffer.c    |    2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h    |    1 +
 3 files changed, 7 insertions(+), 1 deletion(-)

Comments

Jesse Barnes July 2, 2014, 6:29 p.m. UTC | #1
On Thu, 26 Jun 2014 18:24:07 +0100
John.C.Harrison@Intel.com wrote:

> From: John Harrison <John.C.Harrison@Intel.com>
> 
> The scheduler needs to explicitly allocate a seqno to track each submitted batch
> buffer. This must happen a long time before any commands are actually written to
> the ring.
> ---
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |    5 +++++
>  drivers/gpu/drm/i915/intel_ringbuffer.c    |    2 +-
>  drivers/gpu/drm/i915/intel_ringbuffer.h    |    1 +
>  3 files changed, 7 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index ee836a6..ec274ef 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1317,6 +1317,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  		vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
>  	}
>  
> +	/* Allocate a seqno for this batch buffer nice and early. */
> +	ret = intel_ring_alloc_seqno(ring);
> +	if (ret)
> +		goto err;
> +
>  	if (flags & I915_DISPATCH_SECURE)
>  		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
>  	else
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 34d6d6e..737c41b 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1662,7 +1662,7 @@ int intel_ring_idle(struct intel_engine_cs *ring)
>  	return i915_wait_seqno(ring, seqno);
>  }
>  
> -static int
> +int
>  intel_ring_alloc_seqno(struct intel_engine_cs *ring)
>  {
>  	if (ring->outstanding_lazy_seqno)
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 30841ea..cc92de2 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -347,6 +347,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
>  
>  int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
>  int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
> +int __must_check intel_ring_alloc_seqno(struct intel_engine_cs *ring);
>  static inline void intel_ring_emit(struct intel_engine_cs *ring,
>  				   u32 data)
>  {

This ought to be ok even w/o the scheduler, we'll just pick up the
lazy_seqno later on rather than allocating a new one at ring_begin
right?

Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
John Harrison July 23, 2014, 3:11 p.m. UTC | #2
On 02/07/2014 19:29, Jesse Barnes wrote:
> On Thu, 26 Jun 2014 18:24:07 +0100
> John.C.Harrison@Intel.com wrote:
>
>> From: John Harrison <John.C.Harrison@Intel.com>
>>
>> The scheduler needs to explicitly allocate a seqno to track each submitted batch
>> buffer. This must happen a long time before any commands are actually written to
>> the ring.
>> ---
>>   drivers/gpu/drm/i915/i915_gem_execbuffer.c |    5 +++++
>>   drivers/gpu/drm/i915/intel_ringbuffer.c    |    2 +-
>>   drivers/gpu/drm/i915/intel_ringbuffer.h    |    1 +
>>   3 files changed, 7 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> index ee836a6..ec274ef 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> @@ -1317,6 +1317,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>>   		vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
>>   	}
>>   
>> +	/* Allocate a seqno for this batch buffer nice and early. */
>> +	ret = intel_ring_alloc_seqno(ring);
>> +	if (ret)
>> +		goto err;
>> +
>>   	if (flags & I915_DISPATCH_SECURE)
>>   		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
>>   	else
>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> index 34d6d6e..737c41b 100644
>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> @@ -1662,7 +1662,7 @@ int intel_ring_idle(struct intel_engine_cs *ring)
>>   	return i915_wait_seqno(ring, seqno);
>>   }
>>   
>> -static int
>> +int
>>   intel_ring_alloc_seqno(struct intel_engine_cs *ring)
>>   {
>>   	if (ring->outstanding_lazy_seqno)
>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
>> index 30841ea..cc92de2 100644
>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
>> @@ -347,6 +347,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
>>   
>>   int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
>>   int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
>> +int __must_check intel_ring_alloc_seqno(struct intel_engine_cs *ring);
>>   static inline void intel_ring_emit(struct intel_engine_cs *ring,
>>   				   u32 data)
>>   {
> This ought to be ok even w/o the scheduler, we'll just pick up the
> lazy_seqno later on rather than allocating a new one at ring_begin
> right?
>
> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
>

Yes. The early allocation is completely benign.
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ee836a6..ec274ef 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1317,6 +1317,11 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
 	}
 
+	/* Allocate a seqno for this batch buffer nice and early. */
+	ret = intel_ring_alloc_seqno(ring);
+	if (ret)
+		goto err;
+
 	if (flags & I915_DISPATCH_SECURE)
 		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
 	else
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 34d6d6e..737c41b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1662,7 +1662,7 @@  int intel_ring_idle(struct intel_engine_cs *ring)
 	return i915_wait_seqno(ring, seqno);
 }
 
-static int
+int
 intel_ring_alloc_seqno(struct intel_engine_cs *ring)
 {
 	if (ring->outstanding_lazy_seqno)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 30841ea..cc92de2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -347,6 +347,7 @@  void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
 
 int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
 int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+int __must_check intel_ring_alloc_seqno(struct intel_engine_cs *ring);
 static inline void intel_ring_emit(struct intel_engine_cs *ring,
 				   u32 data)
 {