Message ID | 20210506191451.77768-44-matthew.brost@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Basic GuC submission support in the i915 | expand |
On Thu, May 06, 2021 at 12:13:57PM -0700, Matthew Brost wrote: > Add lrc descriptor context lookup array which can resolve the > intel_context from the lrc descriptor index. In addition to lookup, it > can determine in the lrc descriptor context is currently registered with > the GuC by checking if an entry for a descriptor index is present. > Future patches in the series will make use of this array. > > Cc: John Harrison <john.c.harrison@intel.com> > Signed-off-by: Matthew Brost <matthew.brost@intel.com> > --- > drivers/gpu/drm/i915/gt/uc/intel_guc.h | 5 +++ > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 32 +++++++++++++++++-- > 2 files changed, 35 insertions(+), 2 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h > index d84f37afb9d8..2eb6c497e43c 100644 > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h > @@ -6,6 +6,8 @@ > #ifndef _INTEL_GUC_H_ > #define _INTEL_GUC_H_ > > +#include "linux/xarray.h" > + > #include "intel_uncore.h" > #include "intel_guc_fw.h" > #include "intel_guc_fwif.h" > @@ -47,6 +49,9 @@ struct intel_guc { > struct i915_vma *lrc_desc_pool; > void *lrc_desc_pool_vaddr; > > + /* guc_id to intel_context lookup */ > + struct xarray context_lookup; The current code sets a disastrous example, but for stuff like this it's always good to explain the locking, and who's holding references and how you're handling cycles. Since I guess the intel_context also holds the guc_id alive somehow. Again holds for the entire series, where it makes sense (as in we don't expect to rewrite the entire code anyway). -Daniel > + > /* Control params for fw initialization */ > u32 params[GUC_CTL_MAX_DWORDS]; > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > index 6acc1ef34f92..c2b6d27404b7 100644 > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > @@ -65,8 +65,6 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) > return rb_entry(rb, struct i915_priolist, node); > } > > -/* Future patches will use this function */ > -__attribute__ ((unused)) > static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) > { > struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr; > @@ -76,6 +74,15 @@ static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) > return &base[index]; > } > > +static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) > +{ > + struct intel_context *ce = xa_load(&guc->context_lookup, id); > + > + GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS); > + > + return ce; > +} > + > static int guc_lrc_desc_pool_create(struct intel_guc *guc) > { > u32 size; > @@ -96,6 +103,25 @@ static void guc_lrc_desc_pool_destroy(struct intel_guc *guc) > i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP); > } > > +static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) > +{ > + struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); > + > + memset(desc, 0, sizeof(*desc)); > + xa_erase_irq(&guc->context_lookup, id); > +} > + > +static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) > +{ > + return __get_context(guc, id); > +} > + > +static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, > + struct intel_context *ce) > +{ > + xa_store_irq(&guc->context_lookup, id, ce, GFP_ATOMIC); > +} > + > static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) > { > /* Leaving stub as this function will be used in future patches */ > @@ -404,6 +430,8 @@ int intel_guc_submission_init(struct intel_guc *guc) > */ > GEM_BUG_ON(!guc->lrc_desc_pool); > > + xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ); > + > return 0; > } > > -- > 2.28.0 >
On Tue, May 11, 2021 at 05:26:34PM +0200, Daniel Vetter wrote: > On Thu, May 06, 2021 at 12:13:57PM -0700, Matthew Brost wrote: > > Add lrc descriptor context lookup array which can resolve the > > intel_context from the lrc descriptor index. In addition to lookup, it > > can determine in the lrc descriptor context is currently registered with > > the GuC by checking if an entry for a descriptor index is present. > > Future patches in the series will make use of this array. > > > > Cc: John Harrison <john.c.harrison@intel.com> > > Signed-off-by: Matthew Brost <matthew.brost@intel.com> > > --- > > drivers/gpu/drm/i915/gt/uc/intel_guc.h | 5 +++ > > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 32 +++++++++++++++++-- > > 2 files changed, 35 insertions(+), 2 deletions(-) > > > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h > > index d84f37afb9d8..2eb6c497e43c 100644 > > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h > > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h > > @@ -6,6 +6,8 @@ > > #ifndef _INTEL_GUC_H_ > > #define _INTEL_GUC_H_ > > > > +#include "linux/xarray.h" > > + > > #include "intel_uncore.h" > > #include "intel_guc_fw.h" > > #include "intel_guc_fwif.h" > > @@ -47,6 +49,9 @@ struct intel_guc { > > struct i915_vma *lrc_desc_pool; > > void *lrc_desc_pool_vaddr; > > > > + /* guc_id to intel_context lookup */ > > + struct xarray context_lookup; > > The current code sets a disastrous example, but for stuff like this it's > always good to explain the locking, and who's holding references and how > you're handling cycles. Since I guess the intel_context also holds the > guc_id alive somehow. > I think (?) I know what you mean by this comment. How about adding: 'If an entry in the the context_lookup is present, that means a context associated with the guc_id is registered with the GuC. We use this xarray as a lookup mechanism when the GuC communicate with the i915 about the context.' > Again holds for the entire series, where it makes sense (as in we don't > expect to rewrite the entire code anyway). Slightly out of order but one of the last patches in the series, 'Update GuC documentation' adds a big section of comments that attempts to clarify how all of this code works. I likely should add a section explaining the data structures as well. Matt > -Daniel > > > + > > /* Control params for fw initialization */ > > u32 params[GUC_CTL_MAX_DWORDS]; > > > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > > index 6acc1ef34f92..c2b6d27404b7 100644 > > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > > @@ -65,8 +65,6 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) > > return rb_entry(rb, struct i915_priolist, node); > > } > > > > -/* Future patches will use this function */ > > -__attribute__ ((unused)) > > static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) > > { > > struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr; > > @@ -76,6 +74,15 @@ static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) > > return &base[index]; > > } > > > > +static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) > > +{ > > + struct intel_context *ce = xa_load(&guc->context_lookup, id); > > + > > + GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS); > > + > > + return ce; > > +} > > + > > static int guc_lrc_desc_pool_create(struct intel_guc *guc) > > { > > u32 size; > > @@ -96,6 +103,25 @@ static void guc_lrc_desc_pool_destroy(struct intel_guc *guc) > > i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP); > > } > > > > +static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) > > +{ > > + struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); > > + > > + memset(desc, 0, sizeof(*desc)); > > + xa_erase_irq(&guc->context_lookup, id); > > +} > > + > > +static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) > > +{ > > + return __get_context(guc, id); > > +} > > + > > +static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, > > + struct intel_context *ce) > > +{ > > + xa_store_irq(&guc->context_lookup, id, ce, GFP_ATOMIC); > > +} > > + > > static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) > > { > > /* Leaving stub as this function will be used in future patches */ > > @@ -404,6 +430,8 @@ int intel_guc_submission_init(struct intel_guc *guc) > > */ > > GEM_BUG_ON(!guc->lrc_desc_pool); > > > > + xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ); > > + > > return 0; > > } > > > > -- > > 2.28.0 > > > > -- > Daniel Vetter > Software Engineer, Intel Corporation > http://blog.ffwll.ch
On Tue, May 11, 2021 at 10:01:28AM -0700, Matthew Brost wrote: > On Tue, May 11, 2021 at 05:26:34PM +0200, Daniel Vetter wrote: > > On Thu, May 06, 2021 at 12:13:57PM -0700, Matthew Brost wrote: > > > Add lrc descriptor context lookup array which can resolve the > > > intel_context from the lrc descriptor index. In addition to lookup, it > > > can determine in the lrc descriptor context is currently registered with > > > the GuC by checking if an entry for a descriptor index is present. > > > Future patches in the series will make use of this array. > > > > > > Cc: John Harrison <john.c.harrison@intel.com> > > > Signed-off-by: Matthew Brost <matthew.brost@intel.com> > > > --- > > > drivers/gpu/drm/i915/gt/uc/intel_guc.h | 5 +++ > > > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 32 +++++++++++++++++-- > > > 2 files changed, 35 insertions(+), 2 deletions(-) > > > > > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h > > > index d84f37afb9d8..2eb6c497e43c 100644 > > > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h > > > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h > > > @@ -6,6 +6,8 @@ > > > #ifndef _INTEL_GUC_H_ > > > #define _INTEL_GUC_H_ > > > > > > +#include "linux/xarray.h" > > > + > > > #include "intel_uncore.h" > > > #include "intel_guc_fw.h" > > > #include "intel_guc_fwif.h" > > > @@ -47,6 +49,9 @@ struct intel_guc { > > > struct i915_vma *lrc_desc_pool; > > > void *lrc_desc_pool_vaddr; > > > > > > + /* guc_id to intel_context lookup */ > > > + struct xarray context_lookup; > > > > The current code sets a disastrous example, but for stuff like this it's > > always good to explain the locking, and who's holding references and how > > you're handling cycles. Since I guess the intel_context also holds the > > guc_id alive somehow. > > > > I think (?) I know what you mean by this comment. How about adding: > > 'If an entry in the the context_lookup is present, that means a context > associated with the guc_id is registered with the GuC. We use this xarray as a > lookup mechanism when the GuC communicate with the i915 about the context.' So no idea how this works, but generally we put a "Protecte by &struct.lock" or similar in here (so you get a nice link plus something you can use as jump label in your ide too). Plus since intel_context has some lifetime rules, explaining whether you're allowed to use the pointer after you unlock, or whether you need to grab a reference or what exactly is going on. Usually there's three options: - No refcounting, you cannot access a pointer obtained through this after you unluck. - Weak reference, you upgrade to a full reference with kref_get_unless_zero. If that fails it indicates a lookup failure, since you raced with destruction. If it succeeds you can use the pointer after unlock. - Strong reference, you get your own reference that stays valid with kref_get(). I'm just bringing this up because the current i915-gem code is full of very tricky locking and lifetime rules, and explains roughly nothing of it in the data structures. Minimally some hints about the locking/lifetime rules of important structs should be there. For locking rules it's good to double-down on them by adding lockdep_assert_held to all relevant functions (where appropriate only ofc). What I generally don't think makes sense is to then also document the locking in the kerneldoc for the functions. That tends to be one place too many and ime just gets out of date and not useful at all. > > Again holds for the entire series, where it makes sense (as in we don't > > expect to rewrite the entire code anyway). > > Slightly out of order but one of the last patches in the series, 'Update GuC > documentation' adds a big section of comments that attempts to clarify how all > of this code works. I likely should add a section explaining the data structures > as well. Yeah that would be nice. -Daniel > > Matt > > > -Daniel > > > > > + > > > /* Control params for fw initialization */ > > > u32 params[GUC_CTL_MAX_DWORDS]; > > > > > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > > > index 6acc1ef34f92..c2b6d27404b7 100644 > > > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > > > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > > > @@ -65,8 +65,6 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) > > > return rb_entry(rb, struct i915_priolist, node); > > > } > > > > > > -/* Future patches will use this function */ > > > -__attribute__ ((unused)) > > > static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) > > > { > > > struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr; > > > @@ -76,6 +74,15 @@ static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) > > > return &base[index]; > > > } > > > > > > +static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) > > > +{ > > > + struct intel_context *ce = xa_load(&guc->context_lookup, id); > > > + > > > + GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS); > > > + > > > + return ce; > > > +} > > > + > > > static int guc_lrc_desc_pool_create(struct intel_guc *guc) > > > { > > > u32 size; > > > @@ -96,6 +103,25 @@ static void guc_lrc_desc_pool_destroy(struct intel_guc *guc) > > > i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP); > > > } > > > > > > +static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) > > > +{ > > > + struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); > > > + > > > + memset(desc, 0, sizeof(*desc)); > > > + xa_erase_irq(&guc->context_lookup, id); > > > +} > > > + > > > +static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) > > > +{ > > > + return __get_context(guc, id); > > > +} > > > + > > > +static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, > > > + struct intel_context *ce) > > > +{ > > > + xa_store_irq(&guc->context_lookup, id, ce, GFP_ATOMIC); > > > +} > > > + > > > static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) > > > { > > > /* Leaving stub as this function will be used in future patches */ > > > @@ -404,6 +430,8 @@ int intel_guc_submission_init(struct intel_guc *guc) > > > */ > > > GEM_BUG_ON(!guc->lrc_desc_pool); > > > > > > + xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ); > > > + > > > return 0; > > > } > > > > > > -- > > > 2.28.0 > > > > > > > -- > > Daniel Vetter > > Software Engineer, Intel Corporation > > http://blog.ffwll.ch
On Tue, May 11, 2021 at 07:43:30PM +0200, Daniel Vetter wrote: > On Tue, May 11, 2021 at 10:01:28AM -0700, Matthew Brost wrote: > > On Tue, May 11, 2021 at 05:26:34PM +0200, Daniel Vetter wrote: > > > On Thu, May 06, 2021 at 12:13:57PM -0700, Matthew Brost wrote: > > > > Add lrc descriptor context lookup array which can resolve the > > > > intel_context from the lrc descriptor index. In addition to lookup, it > > > > can determine in the lrc descriptor context is currently registered with > > > > the GuC by checking if an entry for a descriptor index is present. > > > > Future patches in the series will make use of this array. > > > > > > > > Cc: John Harrison <john.c.harrison@intel.com> > > > > Signed-off-by: Matthew Brost <matthew.brost@intel.com> > > > > --- > > > > drivers/gpu/drm/i915/gt/uc/intel_guc.h | 5 +++ > > > > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 32 +++++++++++++++++-- > > > > 2 files changed, 35 insertions(+), 2 deletions(-) > > > > > > > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h > > > > index d84f37afb9d8..2eb6c497e43c 100644 > > > > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h > > > > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h > > > > @@ -6,6 +6,8 @@ > > > > #ifndef _INTEL_GUC_H_ > > > > #define _INTEL_GUC_H_ > > > > > > > > +#include "linux/xarray.h" > > > > + > > > > #include "intel_uncore.h" > > > > #include "intel_guc_fw.h" > > > > #include "intel_guc_fwif.h" > > > > @@ -47,6 +49,9 @@ struct intel_guc { > > > > struct i915_vma *lrc_desc_pool; > > > > void *lrc_desc_pool_vaddr; > > > > > > > > + /* guc_id to intel_context lookup */ > > > > + struct xarray context_lookup; > > > > > > The current code sets a disastrous example, but for stuff like this it's > > > always good to explain the locking, and who's holding references and how > > > you're handling cycles. Since I guess the intel_context also holds the > > > guc_id alive somehow. > > > > > > > I think (?) I know what you mean by this comment. How about adding: > > > > 'If an entry in the the context_lookup is present, that means a context > > associated with the guc_id is registered with the GuC. We use this xarray as a > > lookup mechanism when the GuC communicate with the i915 about the context.' > > So no idea how this works, but generally we put a "Protecte by > &struct.lock" or similar in here (so you get a nice link plus something > you can use as jump label in your ide too). Plus since intel_context has > some lifetime rules, explaining whether you're allowed to use the pointer > after you unlock, or whether you need to grab a reference or what exactly > is going on. Usually there's three options: > > - No refcounting, you cannot access a pointer obtained through this after > you unluck. > - Weak reference, you upgrade to a full reference with > kref_get_unless_zero. If that fails it indicates a lookup failure, since > you raced with destruction. If it succeeds you can use the pointer after > unlock. > - Strong reference, you get your own reference that stays valid with > kref_get(). > I think the rules for this are 'if this exists in the xarray, we have ref'. Likewise if the GuC knows about the context we have a ref to the context. > I'm just bringing this up because the current i915-gem code is full of > very tricky locking and lifetime rules, and explains roughly nothing of it > in the data structures. Minimally some hints about the locking/lifetime > rules of important structs should be there. > Agree. I'll add some comments here and to other structures this code uses. > For locking rules it's good to double-down on them by adding > lockdep_assert_held to all relevant functions (where appropriate only > ofc). > Agree. I think I mostly do that in series. That being said the locking is going to be a bit ugly until we switch to the DRM scheduler because currently multiple processes can enter the GuC backend in parallel. With the DRM scheduler we allow a single point of entry which simplifies things quite a bit. The current locking rules are explained in the documentation patch: 'Update GuC documentation'. As the locking evolves so will the documentation + lockdep asserts. Matt > What I generally don't think makes sense is to then also document the > locking in the kerneldoc for the functions. That tends to be one place too > many and ime just gets out of date and not useful at all. > > > > Again holds for the entire series, where it makes sense (as in we don't > > > expect to rewrite the entire code anyway). > > > > Slightly out of order but one of the last patches in the series, 'Update GuC > > documentation' adds a big section of comments that attempts to clarify how all > > of this code works. I likely should add a section explaining the data structures > > as well. > > Yeah that would be nice. > -Daniel > > > > > > Matt > > > > > -Daniel > > > > > > > + > > > > /* Control params for fw initialization */ > > > > u32 params[GUC_CTL_MAX_DWORDS]; > > > > > > > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > > > > index 6acc1ef34f92..c2b6d27404b7 100644 > > > > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > > > > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > > > > @@ -65,8 +65,6 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) > > > > return rb_entry(rb, struct i915_priolist, node); > > > > } > > > > > > > > -/* Future patches will use this function */ > > > > -__attribute__ ((unused)) > > > > static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) > > > > { > > > > struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr; > > > > @@ -76,6 +74,15 @@ static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) > > > > return &base[index]; > > > > } > > > > > > > > +static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) > > > > +{ > > > > + struct intel_context *ce = xa_load(&guc->context_lookup, id); > > > > + > > > > + GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS); > > > > + > > > > + return ce; > > > > +} > > > > + > > > > static int guc_lrc_desc_pool_create(struct intel_guc *guc) > > > > { > > > > u32 size; > > > > @@ -96,6 +103,25 @@ static void guc_lrc_desc_pool_destroy(struct intel_guc *guc) > > > > i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP); > > > > } > > > > > > > > +static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) > > > > +{ > > > > + struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); > > > > + > > > > + memset(desc, 0, sizeof(*desc)); > > > > + xa_erase_irq(&guc->context_lookup, id); > > > > +} > > > > + > > > > +static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) > > > > +{ > > > > + return __get_context(guc, id); > > > > +} > > > > + > > > > +static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, > > > > + struct intel_context *ce) > > > > +{ > > > > + xa_store_irq(&guc->context_lookup, id, ce, GFP_ATOMIC); > > > > +} > > > > + > > > > static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) > > > > { > > > > /* Leaving stub as this function will be used in future patches */ > > > > @@ -404,6 +430,8 @@ int intel_guc_submission_init(struct intel_guc *guc) > > > > */ > > > > GEM_BUG_ON(!guc->lrc_desc_pool); > > > > > > > > + xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ); > > > > + > > > > return 0; > > > > } > > > > > > > > -- > > > > 2.28.0 > > > > > > > > > > -- > > > Daniel Vetter > > > Software Engineer, Intel Corporation > > > http://blog.ffwll.ch > > -- > Daniel Vetter > Software Engineer, Intel Corporation > http://blog.ffwll.ch
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index d84f37afb9d8..2eb6c497e43c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -6,6 +6,8 @@ #ifndef _INTEL_GUC_H_ #define _INTEL_GUC_H_ +#include "linux/xarray.h" + #include "intel_uncore.h" #include "intel_guc_fw.h" #include "intel_guc_fwif.h" @@ -47,6 +49,9 @@ struct intel_guc { struct i915_vma *lrc_desc_pool; void *lrc_desc_pool_vaddr; + /* guc_id to intel_context lookup */ + struct xarray context_lookup; + /* Control params for fw initialization */ u32 params[GUC_CTL_MAX_DWORDS]; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 6acc1ef34f92..c2b6d27404b7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -65,8 +65,6 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) return rb_entry(rb, struct i915_priolist, node); } -/* Future patches will use this function */ -__attribute__ ((unused)) static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) { struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr; @@ -76,6 +74,15 @@ static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) return &base[index]; } +static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) +{ + struct intel_context *ce = xa_load(&guc->context_lookup, id); + + GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS); + + return ce; +} + static int guc_lrc_desc_pool_create(struct intel_guc *guc) { u32 size; @@ -96,6 +103,25 @@ static void guc_lrc_desc_pool_destroy(struct intel_guc *guc) i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP); } +static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) +{ + struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); + + memset(desc, 0, sizeof(*desc)); + xa_erase_irq(&guc->context_lookup, id); +} + +static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) +{ + return __get_context(guc, id); +} + +static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, + struct intel_context *ce) +{ + xa_store_irq(&guc->context_lookup, id, ce, GFP_ATOMIC); +} + static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) { /* Leaving stub as this function will be used in future patches */ @@ -404,6 +430,8 @@ int intel_guc_submission_init(struct intel_guc *guc) */ GEM_BUG_ON(!guc->lrc_desc_pool); + xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ); + return 0; }
Add lrc descriptor context lookup array which can resolve the intel_context from the lrc descriptor index. In addition to lookup, it can determine in the lrc descriptor context is currently registered with the GuC by checking if an entry for a descriptor index is present. Future patches in the series will make use of this array. Cc: John Harrison <john.c.harrison@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> --- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 5 +++ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 32 +++++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-)