diff mbox series

[3/4] drm/i915/uc: Place uC firmware in upper range of GGTT

Message ID 20190409213102.30124-4-fernando.pacheco@intel.com (mailing list archive)
State New, archived
Headers show
Series Perma-pin uC firmware and re-enable global reset | expand

Commit Message

Fernando Pacheco April 9, 2019, 9:31 p.m. UTC
Currently we pin the GuC or HuC firmware image just
before uploading. Perma-pin during uC initialization
instead and use the range reserved at the top of the
address space.

Moving the firmware resulted in needing to:
- restore the ggtt mapping during the suspend/resume path.
- use an additional pinning for the rsa signature which will
  be used during HuC auth as addresses above GUC_GGTT_TOP
  do not map through GTT.

Signed-off-by: Fernando Pacheco <fernando.pacheco@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c     |  2 +
 drivers/gpu/drm/i915/intel_guc.c    |  9 ++-
 drivers/gpu/drm/i915/intel_guc_fw.c | 43 ++++++++++---
 drivers/gpu/drm/i915/intel_guc_fw.h |  3 +
 drivers/gpu/drm/i915/intel_huc.c    | 72 +++++++++++++++++-----
 drivers/gpu/drm/i915/intel_huc.h    |  4 ++
 drivers/gpu/drm/i915/intel_huc_fw.c | 80 ++++++++++++++++++++----
 drivers/gpu/drm/i915/intel_huc_fw.h |  3 +
 drivers/gpu/drm/i915/intel_uc.c     | 39 ++++++++++--
 drivers/gpu/drm/i915/intel_uc.h     |  1 +
 drivers/gpu/drm/i915/intel_uc_fw.c  | 96 ++++++++++++++++++++---------
 drivers/gpu/drm/i915/intel_uc_fw.h  | 12 +++-
 12 files changed, 291 insertions(+), 73 deletions(-)

Comments

Chris Wilson April 9, 2019, 9:53 p.m. UTC | #1
Quoting Fernando Pacheco (2019-04-09 22:31:01)
> Currently we pin the GuC or HuC firmware image just
> before uploading. Perma-pin during uC initialization
> instead and use the range reserved at the top of the
> address space.
> 
> Moving the firmware resulted in needing to:
> - restore the ggtt mapping during the suspend/resume path.
> - use an additional pinning for the rsa signature which will
>   be used during HuC auth as addresses above GUC_GGTT_TOP
>   do not map through GTT.
> 
> Signed-off-by: Fernando Pacheco <fernando.pacheco@intel.com>
> ---
> @@ -315,3 +287,67 @@ void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
>         drm_printf(p, "\tRSA: offset %u, size %u\n",
>                    uc_fw->rsa_offset, uc_fw->rsa_size);
>  }
> +
> +void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
> +                          struct i915_ggtt *ggtt, u64 start)
> +{
> +       struct drm_i915_gem_object *obj = uc_fw->obj;
> +       struct i915_vma dummy = {
> +               .node = { .start = start, .size = obj->base.size },
> +               .size = obj->base.size,
> +               .pages = obj->mm.pages,
> +               .obj = obj,

Shouldn't need .size or .obj, but usually needs .vm.

> +       };
> +
> +       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
> +       ggtt->vm.insert_entries(&ggtt->vm, &dummy, obj->cache_level, 0);
> +}
> +
> +int intel_uc_fw_ggtt_pin(struct intel_uc_fw *uc_fw,
> +                        struct i915_ggtt *ggtt, u64 start)
> +{
> +       struct drm_i915_gem_object *obj = uc_fw->obj;
> +       int err;
> +
> +       err = i915_gem_object_set_to_gtt_domain(obj, false);

Currently requires struct_mutex, and is not required as we can ensure
the pages are flushed on creation.

> +       if (err) {
> +               DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
> +                                intel_uc_fw_type_repr(uc_fw->type), err);
> +               return err;
> +       }
> +
> +       err = i915_gem_object_pin_pages(obj);

I'm pretty sure we don't need to pin the pages here, as the caller
should be holding the pages already for the duration of the bind.

So I think this should just reduce to the ggtt bind.

> +       if (err) {
> +               DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
> +                                intel_uc_fw_type_repr(uc_fw->type), err);
> +               return err;
> +       }
> +
> +       intel_uc_fw_ggtt_bind(uc_fw, ggtt, start);
> +
> +       return 0;
> +}
> +
> +void intel_uc_fw_ggtt_unpin(struct intel_uc_fw *uc_fw,
> +                           struct i915_ggtt *ggtt, u64 start)
> +{
> +       struct drm_i915_gem_object *obj = uc_fw->obj;
> +       u64 length = obj->base.size;
> +
> +       ggtt->vm.clear_range(&ggtt->vm, start, length);
> +
> +       if (i915_gem_object_has_pinned_pages(obj))
> +               i915_gem_object_unpin_pages(obj);

No. You either own the pages, or you do not. Don't guess.
-Chris
Chris Wilson April 9, 2019, 10:11 p.m. UTC | #2
Quoting Fernando Pacheco (2019-04-09 22:31:01)
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index bf3d12f94365..160959785589 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -4508,6 +4508,8 @@ void i915_gem_resume(struct drm_i915_private *i915)
>         i915_gem_restore_gtt_mappings(i915);
>         i915_gem_restore_fences(i915);
>  
> +       intel_uc_restore_ggtt_mapping(i915);

No need, right? The fw ggtt binding is only temporary for the dma xfer.
-Chris
Chris Wilson April 9, 2019, 10:22 p.m. UTC | #3
Quoting Fernando Pacheco (2019-04-09 22:31:01)
> diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
> index 94c04f16a2ad..89e0b942ae86 100644
> --- a/drivers/gpu/drm/i915/intel_huc.c
> +++ b/drivers/gpu/drm/i915/intel_huc.c
> @@ -40,6 +40,59 @@ int intel_huc_init_misc(struct intel_huc *huc)
>         return 0;
>  }
>  
> +/*
> + * The HuC firmware image now sits above GUC_GGTT_TOP and this
> + * portion does not map through GTT. This means GuC cannot
> + * perform the HuC auth with the rsa signature sitting in that
> + * range. We resort to additionally perma-pinning the rsa signature
> + * below GUC_GGTT_TOP and utilizing this mapping to perform
> + * the authentication.
> + */
> +static int intel_huc_rsa_data_create(struct intel_huc *huc)
> +{
> +       struct drm_i915_private *i915 = huc_to_i915(huc);
> +       struct intel_guc *guc = &i915->guc;
> +       struct i915_vma *vma;
> +       void *vaddr;
> +
> +       vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
> +       if (IS_ERR(vma))
> +               return PTR_ERR(vma);
> +

Are we not allocating an object for the dma xfer here that is then bound
to the reserved ggtt node? Why pin it again into the ggtt?
-Chris
Fernando Pacheco April 9, 2019, 10:53 p.m. UTC | #4
On 4/9/19 2:53 PM, Chris Wilson wrote:
> Quoting Fernando Pacheco (2019-04-09 22:31:01)
>> Currently we pin the GuC or HuC firmware image just
>> before uploading. Perma-pin during uC initialization
>> instead and use the range reserved at the top of the
>> address space.
>>
>> Moving the firmware resulted in needing to:
>> - restore the ggtt mapping during the suspend/resume path.
>> - use an additional pinning for the rsa signature which will
>>   be used during HuC auth as addresses above GUC_GGTT_TOP
>>   do not map through GTT.
>>
>> Signed-off-by: Fernando Pacheco <fernando.pacheco@intel.com>
>> ---
>> @@ -315,3 +287,67 @@ void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
>>         drm_printf(p, "\tRSA: offset %u, size %u\n",
>>                    uc_fw->rsa_offset, uc_fw->rsa_size);
>>  }
>> +
>> +void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
>> +                          struct i915_ggtt *ggtt, u64 start)
>> +{
>> +       struct drm_i915_gem_object *obj = uc_fw->obj;
>> +       struct i915_vma dummy = {
>> +               .node = { .start = start, .size = obj->base.size },
>> +               .size = obj->base.size,
>> +               .pages = obj->mm.pages,
>> +               .obj = obj,
> Shouldn't need .size or .obj, but usually needs .vm.
>
>> +       };
>> +
>> +       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
>> +       ggtt->vm.insert_entries(&ggtt->vm, &dummy, obj->cache_level, 0);
>> +}
>> +
>> +int intel_uc_fw_ggtt_pin(struct intel_uc_fw *uc_fw,
>> +                        struct i915_ggtt *ggtt, u64 start)
>> +{
>> +       struct drm_i915_gem_object *obj = uc_fw->obj;
>> +       int err;
>> +
>> +       err = i915_gem_object_set_to_gtt_domain(obj, false);
> Currently requires struct_mutex, and is not required as we can ensure
> the pages are flushed on creation.

My intent was to maintain what was being done before
but just doing it earlier.

But if it's not required..

>> +       if (err) {
>> +               DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
>> +                                intel_uc_fw_type_repr(uc_fw->type), err);
>> +               return err;
>> +       }
>> +
>> +       err = i915_gem_object_pin_pages(obj);
> I'm pretty sure we don't need to pin the pages here, as the caller
> should be holding the pages already for the duration of the bind.
>
> So I think this should just reduce to the ggtt bind.

I might be misunderstanding, so could you please clarify
what you mean by "should be holding"? Are you stating
that the caller already holds the pages?

>> +       if (err) {
>> +               DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
>> +                                intel_uc_fw_type_repr(uc_fw->type), err);
>> +               return err;
>> +       }
>> +
>> +       intel_uc_fw_ggtt_bind(uc_fw, ggtt, start);
>> +
>> +       return 0;
>> +}
>> +
>> +void intel_uc_fw_ggtt_unpin(struct intel_uc_fw *uc_fw,
>> +                           struct i915_ggtt *ggtt, u64 start)
>> +{
>> +       struct drm_i915_gem_object *obj = uc_fw->obj;
>> +       u64 length = obj->base.size;
>> +
>> +       ggtt->vm.clear_range(&ggtt->vm, start, length);
>> +
>> +       if (i915_gem_object_has_pinned_pages(obj))
>> +               i915_gem_object_unpin_pages(obj);
> No. You either own the pages, or you do not. Don't guess.
> -Chris

Yeah my bad.

Thanks,
Fernando
Chris Wilson April 9, 2019, 11:04 p.m. UTC | #5
Quoting Fernando Pacheco (2019-04-09 23:53:08)
> 
> On 4/9/19 2:53 PM, Chris Wilson wrote:
> > Quoting Fernando Pacheco (2019-04-09 22:31:01)
> >> +int intel_uc_fw_ggtt_pin(struct intel_uc_fw *uc_fw,
> >> +                        struct i915_ggtt *ggtt, u64 start)
> >> +{
> >> +       struct drm_i915_gem_object *obj = uc_fw->obj;
> >> +       int err;
> >> +
> >> +       err = i915_gem_object_set_to_gtt_domain(obj, false);
> > Currently requires struct_mutex, and is not required as we can ensure
> > the pages are flushed on creation.
> 
> My intent was to maintain what was being done before
> but just doing it earlier.
> 
> But if it's not required..

More so that it's illegal in the global reset context :)

There are patches on the list that remove the struct_mutex for this
operation (eek, no, ignore that you aren't allowed to take that lock
inside reset either!), but with a bit of care we shouldn't need the
set-to-gtt-domain at all as we can do the flush trivially (if it's even
required).

> >> +       if (err) {
> >> +               DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
> >> +                                intel_uc_fw_type_repr(uc_fw->type), err);
> >> +               return err;
> >> +       }
> >> +
> >> +       err = i915_gem_object_pin_pages(obj);
> > I'm pretty sure we don't need to pin the pages here, as the caller
> > should be holding the pages already for the duration of the bind.
> >
> > So I think this should just reduce to the ggtt bind.
> 
> I might be misunderstanding, so could you please clarify
> what you mean by "should be holding"? Are you stating
> that the caller already holds the pages?

To copy the firmware image into the pages, those pages must exist. To
prevent those pages disappearing, we must have kept them around (i.e
pinned). So we know by construction of the uc_fw object we always have
the pages, and could skip bumping the pin-count around the xfer.
Therefore we only need to bind the existing firmware pages into the GGTT
to perform the dma xfer (after satisfying ourselves that the pages are
indeed flushed).
-Chris
Fernando Pacheco April 9, 2019, 11:18 p.m. UTC | #6
On 4/9/19 3:11 PM, Chris Wilson wrote:
> Quoting Fernando Pacheco (2019-04-09 22:31:01)
>> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
>> index bf3d12f94365..160959785589 100644
>> --- a/drivers/gpu/drm/i915/i915_gem.c
>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>> @@ -4508,6 +4508,8 @@ void i915_gem_resume(struct drm_i915_private *i915)
>>         i915_gem_restore_gtt_mappings(i915);
>>         i915_gem_restore_fences(i915);
>>  
>> +       intel_uc_restore_ggtt_mapping(i915);
> No need, right? The fw ggtt binding is only temporary for the dma xfer.

On resume we re-init the uc hardware and perform the
upload again. Since I moved the binding to the uc init
phase I had to restore the mapping here. And it should
not have been called unconditionally...

Fernando

> -Chris
Fernando Pacheco April 16, 2019, 2:51 p.m. UTC | #7
On 4/9/19 3:22 PM, Chris Wilson wrote:
> Quoting Fernando Pacheco (2019-04-09 22:31:01)
>> diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
>> index 94c04f16a2ad..89e0b942ae86 100644
>> --- a/drivers/gpu/drm/i915/intel_huc.c
>> +++ b/drivers/gpu/drm/i915/intel_huc.c
>> @@ -40,6 +40,59 @@ int intel_huc_init_misc(struct intel_huc *huc)
>>         return 0;
>>  }
>>  
>> +/*
>> + * The HuC firmware image now sits above GUC_GGTT_TOP and this
>> + * portion does not map through GTT. This means GuC cannot
>> + * perform the HuC auth with the rsa signature sitting in that
>> + * range. We resort to additionally perma-pinning the rsa signature
>> + * below GUC_GGTT_TOP and utilizing this mapping to perform
>> + * the authentication.
>> + */
>> +static int intel_huc_rsa_data_create(struct intel_huc *huc)
>> +{
>> +       struct drm_i915_private *i915 = huc_to_i915(huc);
>> +       struct intel_guc *guc = &i915->guc;
>> +       struct i915_vma *vma;
>> +       void *vaddr;
>> +
>> +       vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
>> +       if (IS_ERR(vma))
>> +               return PTR_ERR(vma);
>> +
> Are we not allocating an object for the dma xfer here that is then bound
> to the reserved ggtt node? Why pin it again into the ggtt?
> -Chris

It is not bound to the reserved node. The reserved range is inaccessible by GuC, so I
had to pull the signature back in for the auth stage.

Fernando
Chris Wilson April 16, 2019, 3:04 p.m. UTC | #8
Quoting Fernando Pacheco (2019-04-16 15:51:15)
> 
> On 4/9/19 3:22 PM, Chris Wilson wrote:
> > Quoting Fernando Pacheco (2019-04-09 22:31:01)
> >> diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
> >> index 94c04f16a2ad..89e0b942ae86 100644
> >> --- a/drivers/gpu/drm/i915/intel_huc.c
> >> +++ b/drivers/gpu/drm/i915/intel_huc.c
> >> @@ -40,6 +40,59 @@ int intel_huc_init_misc(struct intel_huc *huc)
> >>         return 0;
> >>  }
> >>  
> >> +/*
> >> + * The HuC firmware image now sits above GUC_GGTT_TOP and this
> >> + * portion does not map through GTT. This means GuC cannot
> >> + * perform the HuC auth with the rsa signature sitting in that
> >> + * range. We resort to additionally perma-pinning the rsa signature
> >> + * below GUC_GGTT_TOP and utilizing this mapping to perform
> >> + * the authentication.
> >> + */
> >> +static int intel_huc_rsa_data_create(struct intel_huc *huc)
> >> +{
> >> +       struct drm_i915_private *i915 = huc_to_i915(huc);
> >> +       struct intel_guc *guc = &i915->guc;
> >> +       struct i915_vma *vma;
> >> +       void *vaddr;
> >> +
> >> +       vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
> >> +       if (IS_ERR(vma))
> >> +               return PTR_ERR(vma);
> >> +
> > Are we not allocating an object for the dma xfer here that is then bound
> > to the reserved ggtt node? Why pin it again into the ggtt?
> > -Chris
> 
> It is not bound to the reserved node. The reserved range is inaccessible by GuC, so I
> had to pull the signature back in for the auth stage.

Oh I see a stray comment above the function, and not about why you
allocate a second pinned vma. Comments are for describing why you do
things in the code; function doc are for telling users how to use the
iface.
-Chris
Fernando Pacheco April 16, 2019, 3:21 p.m. UTC | #9
On 4/16/19 8:04 AM, Chris Wilson wrote:
> Quoting Fernando Pacheco (2019-04-16 15:51:15)
>> On 4/9/19 3:22 PM, Chris Wilson wrote:
>>> Quoting Fernando Pacheco (2019-04-09 22:31:01)
>>>> diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
>>>> index 94c04f16a2ad..89e0b942ae86 100644
>>>> --- a/drivers/gpu/drm/i915/intel_huc.c
>>>> +++ b/drivers/gpu/drm/i915/intel_huc.c
>>>> @@ -40,6 +40,59 @@ int intel_huc_init_misc(struct intel_huc *huc)
>>>>         return 0;
>>>>  }
>>>>  
>>>> +/*
>>>> + * The HuC firmware image now sits above GUC_GGTT_TOP and this
>>>> + * portion does not map through GTT. This means GuC cannot
>>>> + * perform the HuC auth with the rsa signature sitting in that
>>>> + * range. We resort to additionally perma-pinning the rsa signature
>>>> + * below GUC_GGTT_TOP and utilizing this mapping to perform
>>>> + * the authentication.
>>>> + */
>>>> +static int intel_huc_rsa_data_create(struct intel_huc *huc)
>>>> +{
>>>> +       struct drm_i915_private *i915 = huc_to_i915(huc);
>>>> +       struct intel_guc *guc = &i915->guc;
>>>> +       struct i915_vma *vma;
>>>> +       void *vaddr;
>>>> +
>>>> +       vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
>>>> +       if (IS_ERR(vma))
>>>> +               return PTR_ERR(vma);
>>>> +
>>> Are we not allocating an object for the dma xfer here that is then bound
>>> to the reserved ggtt node? Why pin it again into the ggtt?
>>> -Chris
>> It is not bound to the reserved node. The reserved range is inaccessible by GuC, so I
>> had to pull the signature back in for the auth stage.
> Oh I see a stray comment above the function, and not about why you
> allocate a second pinned vma. Comments are for describing why you do

Sorry, I thought I was providing an explanation. I'll revise!

> things in the code; function doc are for telling users how to use the
> iface.

Very true. Thanks for pointing this out.

Fernando

> -Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bf3d12f94365..160959785589 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4508,6 +4508,8 @@  void i915_gem_resume(struct drm_i915_private *i915)
 	i915_gem_restore_gtt_mappings(i915);
 	i915_gem_restore_fences(i915);
 
+	intel_uc_restore_ggtt_mapping(i915);
+
 	/*
 	 * As we didn't flush the kernel context before suspend, we cannot
 	 * guarantee that the context image is complete. So let's just reset
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 3aabfa2d9198..418cf6701bf0 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -189,9 +189,13 @@  int intel_guc_init(struct intel_guc *guc)
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	int ret;
 
-	ret = guc_shared_data_create(guc);
+	ret = intel_guc_fw_ggtt_pin(guc);
 	if (ret)
 		goto err_fetch;
+
+	ret = guc_shared_data_create(guc);
+	if (ret)
+		goto err_fw_pin;
 	GEM_BUG_ON(!guc->shared_data);
 
 	ret = intel_guc_log_create(&guc->log);
@@ -220,6 +224,8 @@  int intel_guc_init(struct intel_guc *guc)
 	intel_guc_log_destroy(&guc->log);
 err_shared:
 	guc_shared_data_destroy(guc);
+err_fw_pin:
+	intel_guc_fw_ggtt_unpin(guc);
 err_fetch:
 	intel_uc_fw_fini(&guc->fw);
 	return ret;
@@ -237,6 +243,7 @@  void intel_guc_fini(struct intel_guc *guc)
 	intel_guc_ads_destroy(guc);
 	intel_guc_log_destroy(&guc->log);
 	guc_shared_data_destroy(guc);
+	intel_guc_fw_ggtt_unpin(guc);
 	intel_uc_fw_fini(&guc->fw);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 4385d9ef02bb..da73d8747694 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -27,6 +27,8 @@ 
  *    Alex Dai <yu.dai@intel.com>
  */
 
+#include <linux/types.h>
+
 #include "intel_guc_fw.h"
 #include "i915_drv.h"
 
@@ -122,14 +124,16 @@  static void guc_prepare_xfer(struct intel_guc *guc)
 }
 
 /* Copy RSA signature from the fw image to HW for verification */
-static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
+static void guc_xfer_rsa(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct intel_uc_fw *fw = &guc->fw;
+	struct sg_table *pages = fw->obj->mm.pages;
 	u32 rsa[UOS_RSA_SCRATCH_COUNT];
 	int i;
 
-	sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
-			   rsa, sizeof(rsa), guc->fw.rsa_offset);
+	sg_pcopy_to_buffer(pages->sgl, pages->nents,
+			   rsa, sizeof(rsa), fw->rsa_offset);
 
 	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
 		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
@@ -201,7 +205,7 @@  static int guc_wait_ucode(struct intel_guc *guc)
  * transfer between GTT locations. This functionality is left out of the API
  * for now as there is no need for it.
  */
-static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+static int guc_xfer_ucode(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct intel_uc_fw *guc_fw = &guc->fw;
@@ -214,7 +218,7 @@  static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
 	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
 
 	/* Set the source address for the new blob */
-	offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
+	offset = intel_guc_fw_ggtt_offset(guc) + guc_fw->header_offset;
 	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
 	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
 
@@ -233,7 +237,7 @@  static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
 /*
  * Load the GuC firmware blob into the MinuteIA.
  */
-static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
+static int guc_fw_xfer(struct intel_uc_fw *guc_fw)
 {
 	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -250,9 +254,9 @@  static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
 	 * by the DMA engine in one operation, whereas the RSA signature is
 	 * loaded via MMIO.
 	 */
-	guc_xfer_rsa(guc, vma);
+	guc_xfer_rsa(guc);
 
-	ret = guc_xfer_ucode(guc, vma);
+	ret = guc_xfer_ucode(guc);
 
 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
@@ -275,3 +279,26 @@  int intel_guc_fw_upload(struct intel_guc *guc)
 {
 	return intel_uc_fw_upload(&guc->fw, guc_fw_xfer);
 }
+
+int intel_guc_fw_ggtt_pin(struct intel_guc *guc)
+{
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	u64 slot = intel_guc_fw_ggtt_offset(guc);
+
+	return intel_uc_fw_ggtt_pin(&guc->fw, ggtt, slot);
+}
+
+void intel_guc_fw_ggtt_unpin(struct intel_guc *guc)
+{
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	u64 slot = intel_guc_fw_ggtt_offset(guc);
+
+	intel_uc_fw_ggtt_unpin(&guc->fw, ggtt, slot);
+}
+
+u32 intel_guc_fw_ggtt_offset(struct intel_guc *guc)
+{
+	return intel_uc_fw_ggtt_offset(&guc->fw);
+}
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.h b/drivers/gpu/drm/i915/intel_guc_fw.h
index 4ec5d3d9e2b0..a4610dec59bf 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.h
+++ b/drivers/gpu/drm/i915/intel_guc_fw.h
@@ -29,5 +29,8 @@  struct intel_guc;
 
 void intel_guc_fw_init_early(struct intel_guc *guc);
 int intel_guc_fw_upload(struct intel_guc *guc);
+int intel_guc_fw_ggtt_pin(struct intel_guc *guc);
+void intel_guc_fw_ggtt_unpin(struct intel_guc *guc);
+u32 intel_guc_fw_ggtt_offset(struct intel_guc *guc);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 94c04f16a2ad..89e0b942ae86 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -40,6 +40,59 @@  int intel_huc_init_misc(struct intel_huc *huc)
 	return 0;
 }
 
+/*
+ * The HuC firmware image now sits above GUC_GGTT_TOP and this
+ * portion does not map through GTT. This means GuC cannot
+ * perform the HuC auth with the rsa signature sitting in that
+ * range. We resort to additionally perma-pinning the rsa signature
+ * below GUC_GGTT_TOP and utilizing this mapping to perform
+ * the authentication.
+ */
+static int intel_huc_rsa_data_create(struct intel_huc *huc)
+{
+	struct drm_i915_private *i915 = huc_to_i915(huc);
+	struct intel_guc *guc = &i915->guc;
+	struct i915_vma *vma;
+	void *vaddr;
+
+	vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+	if (IS_ERR(vaddr)) {
+		i915_vma_unpin_and_release(&vma, 0);
+		return PTR_ERR(vaddr);
+	}
+
+	huc->rsa_data = vma;
+	huc->rsa_data_vaddr = vaddr;
+
+	return 0;
+}
+
+static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
+{
+	i915_vma_unpin_and_release(&huc->rsa_data, I915_VMA_RELEASE_MAP);
+}
+
+int intel_huc_init(struct intel_huc *huc)
+{
+	int err;
+
+	err = intel_huc_rsa_data_create(huc);
+	if (err)
+		return err;
+
+	return intel_huc_fw_ggtt_pin(huc);
+}
+
+void intel_huc_fini(struct intel_huc *huc)
+{
+	intel_huc_fw_ggtt_unpin(huc);
+	intel_huc_rsa_data_destroy(huc);
+}
+
 /**
  * intel_huc_auth() - Authenticate HuC uCode
  * @huc: intel_huc structure
@@ -55,27 +108,17 @@  int intel_huc_auth(struct intel_huc *huc)
 {
 	struct drm_i915_private *i915 = huc_to_i915(huc);
 	struct intel_guc *guc = &i915->guc;
-	struct i915_vma *vma;
 	u32 status;
 	int ret;
 
 	if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
 		return -ENOEXEC;
 
-	vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
-				       PIN_OFFSET_BIAS | i915->ggtt.pin_bias);
-	if (IS_ERR(vma)) {
-		ret = PTR_ERR(vma);
-		DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
-		goto fail;
-	}
-
 	ret = intel_guc_auth_huc(guc,
-				 intel_guc_ggtt_offset(guc, vma) +
-				 huc->fw.rsa_offset);
+				 intel_guc_ggtt_offset(guc, huc->rsa_data));
 	if (ret) {
 		DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
-		goto fail_unpin;
+		goto fail;
 	}
 
 	/* Check authentication status, it should be done by now */
@@ -86,14 +129,11 @@  int intel_huc_auth(struct intel_huc *huc)
 					2, 50, &status);
 	if (ret) {
 		DRM_ERROR("HuC: Firmware not verified %#x\n", status);
-		goto fail_unpin;
+		goto fail;
 	}
 
-	i915_vma_unpin(vma);
 	return 0;
 
-fail_unpin:
-	i915_vma_unpin(vma);
 fail:
 	huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
 
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index 7e41d870b509..5b2f4167b529 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -33,10 +33,14 @@  struct intel_huc {
 	struct intel_uc_fw fw;
 
 	/* HuC-specific additions */
+	struct i915_vma *rsa_data;
+	void *rsa_data_vaddr;
 };
 
 void intel_huc_init_early(struct intel_huc *huc);
 int intel_huc_init_misc(struct intel_huc *huc);
+int intel_huc_init(struct intel_huc *huc);
+void intel_huc_fini(struct intel_huc *huc);
 int intel_huc_auth(struct intel_huc *huc);
 int intel_huc_check_status(struct intel_huc *huc);
 
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index 80a176d91edc..3775a784f652 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -4,6 +4,8 @@ 
  * Copyright © 2014-2018 Intel Corporation
  */
 
+#include <linux/types.h>
+
 #include "intel_huc_fw.h"
 #include "i915_drv.h"
 
@@ -93,18 +95,20 @@  void intel_huc_fw_init_early(struct intel_huc *huc)
 	huc_fw_select(huc_fw);
 }
 
-/**
- * huc_fw_xfer() - DMA's the firmware
- * @huc_fw: the firmware descriptor
- * @vma: the firmware image (bound into the GGTT)
- *
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Return: 0 on success, non-zero on failure
- */
-static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
+/* Copy RSA signature from the fw image to within GuC addressable range */
+static void huc_xfer_rsa(struct intel_huc *huc)
 {
-	struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+	struct intel_uc_fw *fw = &huc->fw;
+	struct sg_table *pages = fw->obj->mm.pages;
+
+	sg_pcopy_to_buffer(pages->sgl, pages->nents,
+			   huc->rsa_data_vaddr, fw->rsa_size,
+			   fw->rsa_offset);
+}
+
+static int huc_xfer_ucode(struct intel_huc *huc)
+{
+	struct intel_uc_fw *huc_fw = &huc->fw;
 	struct drm_i915_private *dev_priv = huc_to_i915(huc);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	unsigned long offset = 0;
@@ -116,7 +120,7 @@  static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
 	/* Set the source address for the uCode */
-	offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
+	offset = intel_huc_fw_ggtt_offset(huc) +
 		 huc_fw->header_offset;
 	intel_uncore_write(uncore, DMA_ADDR_0_LOW,
 			   lower_32_bits(offset));
@@ -150,6 +154,23 @@  static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
 	return ret;
 }
 
+/**
+ * huc_fw_xfer() - DMA's the firmware
+ * @huc_fw: the firmware descriptor
+ *
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int huc_fw_xfer(struct intel_uc_fw *huc_fw)
+{
+	struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+
+	huc_xfer_rsa(huc);
+
+	return huc_xfer_ucode(huc);
+}
+
 /**
  * intel_huc_fw_upload() - load HuC uCode to device
  * @huc: intel_huc structure
@@ -166,3 +187,38 @@  int intel_huc_fw_upload(struct intel_huc *huc)
 {
 	return intel_uc_fw_upload(&huc->fw, huc_fw_xfer);
 }
+
+int intel_huc_fw_ggtt_pin(struct intel_huc *huc)
+{
+	struct drm_i915_private *i915 = huc_to_i915(huc);
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	u64 slot = intel_huc_fw_ggtt_offset(huc);
+
+	return intel_uc_fw_ggtt_pin(&huc->fw, ggtt, slot);
+}
+
+void intel_huc_fw_ggtt_unpin(struct intel_huc *huc)
+{
+	struct drm_i915_private *i915 = huc_to_i915(huc);
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	u64 slot = intel_huc_fw_ggtt_offset(huc);
+
+	intel_uc_fw_ggtt_unpin(&huc->fw, ggtt, slot);
+}
+
+u32 intel_huc_fw_ggtt_offset(struct intel_huc *huc)
+{
+	struct drm_i915_private *i915 = huc_to_i915(huc);
+	u32 offset = intel_uc_fw_ggtt_offset(&huc->fw);
+
+	/*
+	 * NOTE: GuC and HuC firmware will share the mm allocation
+	 * range, with GuC filling [range_start, range_start + guc_fw_size).
+	 * The HuC firmware is placed at the next page-aligned slot.
+	 */
+	offset += round_up(i915->guc.fw.size, PAGE_SIZE);
+
+	GEM_BUG_ON(upper_32_bits(offset));
+
+	return offset;
+}
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.h b/drivers/gpu/drm/i915/intel_huc_fw.h
index 8a00a0ebddc5..4d19a61eb3c5 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.h
+++ b/drivers/gpu/drm/i915/intel_huc_fw.h
@@ -11,5 +11,8 @@  struct intel_huc;
 
 void intel_huc_fw_init_early(struct intel_huc *huc);
 int intel_huc_fw_upload(struct intel_huc *huc);
+int intel_huc_fw_ggtt_pin(struct intel_huc *huc);
+void intel_huc_fw_ggtt_unpin(struct intel_huc *huc);
+u32 intel_huc_fw_ggtt_offset(struct intel_huc *huc);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 25b80ffe71ad..fe74647bc496 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -280,6 +280,7 @@  void intel_uc_fini_misc(struct drm_i915_private *i915)
 int intel_uc_init(struct drm_i915_private *i915)
 {
 	struct intel_guc *guc = &i915->guc;
+	struct intel_huc *huc = &i915->huc;
 	int ret;
 
 	if (!USES_GUC(i915))
@@ -292,19 +293,30 @@  int intel_uc_init(struct drm_i915_private *i915)
 	if (ret)
 		return ret;
 
+	if (USES_HUC(i915)) {
+		ret = intel_huc_init(huc);
+		if (ret)
+			goto err_guc;
+	}
+
 	if (USES_GUC_SUBMISSION(i915)) {
 		/*
 		 * This is stuff we need to have available at fw load time
 		 * if we are planning to enable submission later
 		 */
 		ret = intel_guc_submission_init(guc);
-		if (ret) {
-			intel_guc_fini(guc);
-			return ret;
-		}
+		if (ret)
+			goto err_huc;
 	}
 
 	return 0;
+
+err_huc:
+	if (USES_HUC(i915))
+		intel_huc_fini(huc);
+err_guc:
+	intel_guc_fini(guc);
+	return ret;
 }
 
 void intel_uc_fini(struct drm_i915_private *i915)
@@ -319,6 +331,9 @@  void intel_uc_fini(struct drm_i915_private *i915)
 	if (USES_GUC_SUBMISSION(i915))
 		intel_guc_submission_fini(guc);
 
+	if (USES_HUC(i915))
+		intel_huc_fini(&i915->huc);
+
 	intel_guc_fini(guc);
 }
 
@@ -488,6 +503,22 @@  int intel_uc_suspend(struct drm_i915_private *i915)
 	return 0;
 }
 
+void intel_uc_restore_ggtt_mapping(struct drm_i915_private *i915)
+{
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	struct intel_guc *guc = &i915->guc;
+	u64 slot = intel_guc_fw_ggtt_offset(guc);
+
+	intel_uc_fw_ggtt_bind(&guc->fw, ggtt, slot);
+
+	if (USES_HUC(i915)) {
+		struct intel_huc *huc = &i915->huc;
+
+		slot = intel_huc_fw_ggtt_offset(huc);
+		intel_uc_fw_ggtt_bind(&huc->fw, ggtt, slot);
+	}
+}
+
 int intel_uc_resume(struct drm_i915_private *i915)
 {
 	struct intel_guc *guc = &i915->guc;
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index c14729786652..e7e2e871700e 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -40,6 +40,7 @@  int intel_uc_init(struct drm_i915_private *dev_priv);
 void intel_uc_fini(struct drm_i915_private *dev_priv);
 void intel_uc_reset_prepare(struct drm_i915_private *i915);
 int intel_uc_suspend(struct drm_i915_private *dev_priv);
+void intel_uc_restore_ggtt_mapping(struct drm_i915_private *i915);
 int intel_uc_resume(struct drm_i915_private *dev_priv);
 
 static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index becf05ebae4d..322d80941abc 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -201,11 +201,8 @@  void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
  * Return: 0 on success, non-zero on failure.
  */
 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
-		       int (*xfer)(struct intel_uc_fw *uc_fw,
-				   struct i915_vma *vma))
+		       int (*xfer)(struct intel_uc_fw *uc_fw))
 {
-	struct i915_vma *vma;
-	u32 ggtt_pin_bias;
 	int err;
 
 	DRM_DEBUG_DRIVER("%s fw load %s\n",
@@ -219,33 +216,8 @@  int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
 			 intel_uc_fw_type_repr(uc_fw->type),
 			 intel_uc_fw_status_repr(uc_fw->load_status));
 
-	/* Pin object with firmware */
-	err = i915_gem_object_set_to_gtt_domain(uc_fw->obj, false);
-	if (err) {
-		DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
-				 intel_uc_fw_type_repr(uc_fw->type), err);
-		goto fail;
-	}
-
-	ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->ggtt.pin_bias;
-	vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
-				       PIN_OFFSET_BIAS | ggtt_pin_bias);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n",
-				 intel_uc_fw_type_repr(uc_fw->type), err);
-		goto fail;
-	}
-
 	/* Call custom loader */
-	err = xfer(uc_fw, vma);
-
-	/*
-	 * We keep the object pages for reuse during resume. But we can unpin it
-	 * now that DMA has completed, so it doesn't continue to take up space.
-	 */
-	i915_vma_unpin(vma);
-
+	err = xfer(uc_fw);
 	if (err)
 		goto fail;
 
@@ -315,3 +287,67 @@  void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
 	drm_printf(p, "\tRSA: offset %u, size %u\n",
 		   uc_fw->rsa_offset, uc_fw->rsa_size);
 }
+
+void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
+			   struct i915_ggtt *ggtt, u64 start)
+{
+	struct drm_i915_gem_object *obj = uc_fw->obj;
+	struct i915_vma dummy = {
+		.node = { .start = start, .size = obj->base.size },
+		.size = obj->base.size,
+		.pages = obj->mm.pages,
+		.obj = obj,
+	};
+
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+	ggtt->vm.insert_entries(&ggtt->vm, &dummy, obj->cache_level, 0);
+}
+
+int intel_uc_fw_ggtt_pin(struct intel_uc_fw *uc_fw,
+			 struct i915_ggtt *ggtt, u64 start)
+{
+	struct drm_i915_gem_object *obj = uc_fw->obj;
+	int err;
+
+	err = i915_gem_object_set_to_gtt_domain(obj, false);
+	if (err) {
+		DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
+				 intel_uc_fw_type_repr(uc_fw->type), err);
+		return err;
+	}
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err) {
+		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
+				 intel_uc_fw_type_repr(uc_fw->type), err);
+		return err;
+	}
+
+	intel_uc_fw_ggtt_bind(uc_fw, ggtt, start);
+
+	return 0;
+}
+
+void intel_uc_fw_ggtt_unpin(struct intel_uc_fw *uc_fw,
+			    struct i915_ggtt *ggtt, u64 start)
+{
+	struct drm_i915_gem_object *obj = uc_fw->obj;
+	u64 length = obj->base.size;
+
+	ggtt->vm.clear_range(&ggtt->vm, start, length);
+
+	if (i915_gem_object_has_pinned_pages(obj))
+		i915_gem_object_unpin_pages(obj);
+}
+
+u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
+{
+	struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev);
+	struct drm_mm_node *node = &i915->ggtt.uc_fw;
+
+	GEM_BUG_ON(!node->allocated);
+	GEM_BUG_ON(upper_32_bits(node->start));
+	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
+
+	return lower_32_bits(node->start);
+}
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 854c3a383e07..10ae1bdde3d7 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -28,6 +28,7 @@ 
 struct drm_printer;
 struct drm_i915_private;
 struct i915_vma;
+struct i915_ggtt;
 
 /* Home of GuC, HuC and DMC firmwares */
 #define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
@@ -146,9 +147,16 @@  static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
 void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 		       struct intel_uc_fw *uc_fw);
 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
-		       int (*xfer)(struct intel_uc_fw *uc_fw,
-				   struct i915_vma *vma));
+		       int (*xfer)(struct intel_uc_fw *uc_fw));
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
+void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
+			   struct i915_ggtt *ggtt, u64 start);
+int intel_uc_fw_ggtt_pin(struct intel_uc_fw *uc_fw,
+			 struct i915_ggtt *ggtt, u64 start);
+void intel_uc_fw_ggtt_unpin(struct intel_uc_fw *uc_fw,
+			    struct i915_ggtt *ggtt, u64 start);
+u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw);
 
 #endif