diff mbox series

[RFC,12/97] drm/i915/guc: Don't repeat CTB layout calculations

Message ID 20210506191451.77768-13-matthew.brost@intel.com (mailing list archive)
State New, archived
Headers show
Series Basic GuC submission support in the i915 | expand

Commit Message

Matthew Brost May 6, 2021, 7:13 p.m. UTC
From: Michal Wajdeczko <michal.wajdeczko@intel.com>

We can retrieve offsets to cmds buffers and descriptor from
actual pointers that we already keep locally.

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

Comments

Matthew Brost May 25, 2021, 2:53 a.m. UTC | #1
On Thu, May 06, 2021 at 12:13:26PM -0700, Matthew Brost wrote:
> From: Michal Wajdeczko <michal.wajdeczko@intel.com>
> 
> We can retrieve offsets to cmds buffers and descriptor from
> actual pointers that we already keep locally.
> 
> Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>  drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 16 ++++++++++------
>  1 file changed, 10 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> index dbece569fbe4..fbd6bd20f588 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> @@ -244,6 +244,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
>  {
>  	struct intel_guc *guc = ct_to_guc(ct);
>  	u32 base, cmds;
> +	void *blob;
>  	int err;
>  	int i;
>  
> @@ -251,15 +252,18 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
>  
>  	/* vma should be already allocated and map'ed */
>  	GEM_BUG_ON(!ct->vma);
> +	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));

This doesn't really have anything to do with this patch, but again this
patch will be squashed into a large patch updating the GuC firmware, so
I think this is fine.

With that:
Reviewed-by: Matthew Brost <matthew.brost@intel.com>

>  	base = intel_guc_ggtt_offset(guc, ct->vma);
>  
> -	/* (re)initialize descriptors
> -	 * cmds buffers are in the second half of the blob page
> -	 */
> +	/* blob should start with send descriptor */
> +	blob = __px_vaddr(ct->vma->obj);
> +	GEM_BUG_ON(blob != ct->ctbs[CTB_SEND].desc);
> +
> +	/* (re)initialize descriptors */
>  	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
>  		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
>  
> -		cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
> +		cmds = base + ptrdiff(ct->ctbs[i].cmds, blob);
>  		CT_DEBUG(ct, "%d: cmds addr=%#x\n", i, cmds);
>  
>  		guc_ct_buffer_reset(&ct->ctbs[i], cmds);
> @@ -269,12 +273,12 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
>  	 * Register both CT buffers starting with RECV buffer.
>  	 * Descriptors are in first half of the blob.
>  	 */
> -	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
> +	err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs[CTB_RECV].desc, blob),
>  				 INTEL_GUC_CT_BUFFER_TYPE_RECV);
>  	if (unlikely(err))
>  		goto err_out;
>  
> -	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
> +	err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs[CTB_SEND].desc, blob),
>  				 INTEL_GUC_CT_BUFFER_TYPE_SEND);
>  	if (unlikely(err))
>  		goto err_deregister;
> -- 
> 2.28.0
>
Michal Wajdeczko May 25, 2021, 1:07 p.m. UTC | #2
On 25.05.2021 04:53, Matthew Brost wrote:
> On Thu, May 06, 2021 at 12:13:26PM -0700, Matthew Brost wrote:
>> From: Michal Wajdeczko <michal.wajdeczko@intel.com>
>>
>> We can retrieve offsets to cmds buffers and descriptor from
>> actual pointers that we already keep locally.
>>
>> Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
>> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
>> ---
>>  drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 16 ++++++++++------
>>  1 file changed, 10 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
>> index dbece569fbe4..fbd6bd20f588 100644
>> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
>> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
>> @@ -244,6 +244,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
>>  {
>>  	struct intel_guc *guc = ct_to_guc(ct);
>>  	u32 base, cmds;
>> +	void *blob;
>>  	int err;
>>  	int i;
>>  
>> @@ -251,15 +252,18 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
>>  
>>  	/* vma should be already allocated and map'ed */
>>  	GEM_BUG_ON(!ct->vma);
>> +	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
> 
> This doesn't really have anything to do with this patch, but again this
> patch will be squashed into a large patch updating the GuC firmware, so
> I think this is fine.

again, no need to squash GuC patches up to 20/97

> 
> With that:
> Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> 
>>  	base = intel_guc_ggtt_offset(guc, ct->vma);
>>  
>> -	/* (re)initialize descriptors
>> -	 * cmds buffers are in the second half of the blob page
>> -	 */
>> +	/* blob should start with send descriptor */
>> +	blob = __px_vaddr(ct->vma->obj);
>> +	GEM_BUG_ON(blob != ct->ctbs[CTB_SEND].desc);
>> +
>> +	/* (re)initialize descriptors */
>>  	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
>>  		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
>>  
>> -		cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
>> +		cmds = base + ptrdiff(ct->ctbs[i].cmds, blob);
>>  		CT_DEBUG(ct, "%d: cmds addr=%#x\n", i, cmds);
>>  
>>  		guc_ct_buffer_reset(&ct->ctbs[i], cmds);
>> @@ -269,12 +273,12 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
>>  	 * Register both CT buffers starting with RECV buffer.
>>  	 * Descriptors are in first half of the blob.
>>  	 */
>> -	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
>> +	err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs[CTB_RECV].desc, blob),
>>  				 INTEL_GUC_CT_BUFFER_TYPE_RECV);
>>  	if (unlikely(err))
>>  		goto err_out;
>>  
>> -	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
>> +	err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs[CTB_SEND].desc, blob),
>>  				 INTEL_GUC_CT_BUFFER_TYPE_SEND);
>>  	if (unlikely(err))
>>  		goto err_deregister;
>> -- 
>> 2.28.0
>>
Matthew Brost May 25, 2021, 4:56 p.m. UTC | #3
On Tue, May 25, 2021 at 03:07:06PM +0200, Michal Wajdeczko wrote:
> 
> 
> On 25.05.2021 04:53, Matthew Brost wrote:
> > On Thu, May 06, 2021 at 12:13:26PM -0700, Matthew Brost wrote:
> >> From: Michal Wajdeczko <michal.wajdeczko@intel.com>
> >>
> >> We can retrieve offsets to cmds buffers and descriptor from
> >> actual pointers that we already keep locally.
> >>
> >> Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
> >> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> >> ---
> >>  drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 16 ++++++++++------
> >>  1 file changed, 10 insertions(+), 6 deletions(-)
> >>
> >> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> >> index dbece569fbe4..fbd6bd20f588 100644
> >> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> >> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
> >> @@ -244,6 +244,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
> >>  {
> >>  	struct intel_guc *guc = ct_to_guc(ct);
> >>  	u32 base, cmds;
> >> +	void *blob;
> >>  	int err;
> >>  	int i;
> >>  
> >> @@ -251,15 +252,18 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
> >>  
> >>  	/* vma should be already allocated and map'ed */
> >>  	GEM_BUG_ON(!ct->vma);
> >> +	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
> > 
> > This doesn't really have anything to do with this patch, but again this
> > patch will be squashed into a large patch updating the GuC firmware, so
> > I think this is fine.
> 
> again, no need to squash GuC patches up to 20/97
> 

Got it. As discussed I will post patches 4-20 after I done reviewing all
of them.

Matt 

> > 
> > With that:
> > Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> > 
> >>  	base = intel_guc_ggtt_offset(guc, ct->vma);
> >>  
> >> -	/* (re)initialize descriptors
> >> -	 * cmds buffers are in the second half of the blob page
> >> -	 */
> >> +	/* blob should start with send descriptor */
> >> +	blob = __px_vaddr(ct->vma->obj);
> >> +	GEM_BUG_ON(blob != ct->ctbs[CTB_SEND].desc);
> >> +
> >> +	/* (re)initialize descriptors */
> >>  	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
> >>  		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
> >>  
> >> -		cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
> >> +		cmds = base + ptrdiff(ct->ctbs[i].cmds, blob);
> >>  		CT_DEBUG(ct, "%d: cmds addr=%#x\n", i, cmds);
> >>  
> >>  		guc_ct_buffer_reset(&ct->ctbs[i], cmds);
> >> @@ -269,12 +273,12 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
> >>  	 * Register both CT buffers starting with RECV buffer.
> >>  	 * Descriptors are in first half of the blob.
> >>  	 */
> >> -	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
> >> +	err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs[CTB_RECV].desc, blob),
> >>  				 INTEL_GUC_CT_BUFFER_TYPE_RECV);
> >>  	if (unlikely(err))
> >>  		goto err_out;
> >>  
> >> -	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
> >> +	err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs[CTB_SEND].desc, blob),
> >>  				 INTEL_GUC_CT_BUFFER_TYPE_SEND);
> >>  	if (unlikely(err))
> >>  		goto err_deregister;
> >> -- 
> >> 2.28.0
> >>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index dbece569fbe4..fbd6bd20f588 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -244,6 +244,7 @@  int intel_guc_ct_enable(struct intel_guc_ct *ct)
 {
 	struct intel_guc *guc = ct_to_guc(ct);
 	u32 base, cmds;
+	void *blob;
 	int err;
 	int i;
 
@@ -251,15 +252,18 @@  int intel_guc_ct_enable(struct intel_guc_ct *ct)
 
 	/* vma should be already allocated and map'ed */
 	GEM_BUG_ON(!ct->vma);
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
 	base = intel_guc_ggtt_offset(guc, ct->vma);
 
-	/* (re)initialize descriptors
-	 * cmds buffers are in the second half of the blob page
-	 */
+	/* blob should start with send descriptor */
+	blob = __px_vaddr(ct->vma->obj);
+	GEM_BUG_ON(blob != ct->ctbs[CTB_SEND].desc);
+
+	/* (re)initialize descriptors */
 	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
 
-		cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
+		cmds = base + ptrdiff(ct->ctbs[i].cmds, blob);
 		CT_DEBUG(ct, "%d: cmds addr=%#x\n", i, cmds);
 
 		guc_ct_buffer_reset(&ct->ctbs[i], cmds);
@@ -269,12 +273,12 @@  int intel_guc_ct_enable(struct intel_guc_ct *ct)
 	 * Register both CT buffers starting with RECV buffer.
 	 * Descriptors are in first half of the blob.
 	 */
-	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
+	err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs[CTB_RECV].desc, blob),
 				 INTEL_GUC_CT_BUFFER_TYPE_RECV);
 	if (unlikely(err))
 		goto err_out;
 
-	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
+	err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs[CTB_SEND].desc, blob),
 				 INTEL_GUC_CT_BUFFER_TYPE_SEND);
 	if (unlikely(err))
 		goto err_deregister;