Message ID | 20230920144343.64830-7-dakr@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | DRM GPUVA Manager GPU-VM features | expand |
Hi Danilo,
kernel test robot noticed the following build warnings:
[auto build test WARNING on 1c7a387ffef894b1ab3942f0482dac7a6e0a909c]
url: https://github.com/intel-lab-lkp/linux/commits/Danilo-Krummrich/drm-gpuvm-rename-struct-drm_gpuva_manager-to-struct-drm_gpuvm/20230920-224605
base: 1c7a387ffef894b1ab3942f0482dac7a6e0a909c
patch link: https://lore.kernel.org/r/20230920144343.64830-7-dakr%40redhat.com
patch subject: [PATCH drm-misc-next v4 6/8] drm/gpuvm: add drm_gpuvm_flags to drm_gpuvm
config: alpha-allyesconfig (https://download.01.org/0day-ci/archive/20230921/202309210041.Ypce0gUk-lkp@intel.com/config)
compiler: alpha-linux-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20230921/202309210041.Ypce0gUk-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202309210041.Ypce0gUk-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> drivers/gpu/drm/drm_gpuvm.c:712: warning: Function parameter or member 'flags' not described in 'drm_gpuvm_init'
vim +712 drivers/gpu/drm/drm_gpuvm.c
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 689
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 690 /**
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 691 * drm_gpuvm_init() - initialize a &drm_gpuvm
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 692 * @gpuvm: pointer to the &drm_gpuvm to initialize
52ef25512ca721 drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 693 * @drm: the drivers &drm_device
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 694 * @name: the name of the GPU VA space
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 695 * @start_offset: the start offset of the GPU VA space
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 696 * @range: the size of the GPU VA space
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 697 * @reserve_offset: the start of the kernel reserved GPU VA area
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 698 * @reserve_range: the size of the kernel reserved GPU VA area
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 699 * @ops: &drm_gpuvm_ops called on &drm_gpuvm_sm_map / &drm_gpuvm_sm_unmap
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 700 *
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 701 * The &drm_gpuvm must be initialized with this function before use.
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 702 *
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 703 * Note that @gpuvm must be cleared to 0 before calling this function. The given
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 704 * &name is expected to be managed by the surrounding driver structures.
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 705 */
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 706 void
52ef25512ca721 drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 707 drm_gpuvm_init(struct drm_gpuvm *gpuvm, struct drm_device *drm,
790facc6dac6ef drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 708 const char *name, enum drm_gpuva_flags flags,
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 709 u64 start_offset, u64 range,
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 710 u64 reserve_offset, u64 reserve_range,
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 711 const struct drm_gpuvm_ops *ops)
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 @712 {
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 713 gpuvm->rb.tree = RB_ROOT_CACHED;
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 714 INIT_LIST_HEAD(&gpuvm->rb.list);
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 715
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 716 drm_gpuvm_check_overflow(start_offset, range);
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 717 gpuvm->mm_start = start_offset;
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 718 gpuvm->mm_range = range;
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 719
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 720 gpuvm->name = name ? name : "unknown";
790facc6dac6ef drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 721 gpuvm->flags = flags;
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 722 gpuvm->ops = ops;
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 723
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 724 memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 725
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 726 if (reserve_range) {
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 727 gpuvm->kernel_alloc_node.va.addr = reserve_offset;
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 728 gpuvm->kernel_alloc_node.va.range = reserve_range;
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 729
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 730 if (likely(!drm_gpuvm_check_overflow(reserve_offset,
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 731 reserve_range)))
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 732 __drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node);
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 733 }
52ef25512ca721 drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 734
52ef25512ca721 drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 735 drm_gem_private_object_init(drm, &gpuvm->d_obj, 0);
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 736 }
06f9274d201d5d drivers/gpu/drm/drm_gpuvm.c Danilo Krummrich 2023-09-20 737 EXPORT_SYMBOL_GPL(drm_gpuvm_init);
e6303f323b1ad9 drivers/gpu/drm/drm_gpuva_mgr.c Danilo Krummrich 2023-07-20 738
On Wed, 20 Sep 2023 16:42:39 +0200 Danilo Krummrich <dakr@redhat.com> wrote: > void drm_gpuvm_init(struct drm_gpuvm *gpuvm, struct drm_device *drm, > - const char *name, > + const char *name, enum drm_gpuva_flags flags, s/drm_gpuva_flags/drm_gpuvm_flags/gc
On Wed, 20 Sep 2023 16:42:39 +0200 Danilo Krummrich <dakr@redhat.com> wrote: > +/** > + * enum drm_gpuvm_flags - flags for struct drm_gpuvm > + */ > +enum drm_gpuvm_flags { > + /** > + * @DRM_GPUVM_USERBITS: user defined bits > + */ > + DRM_GPUVM_USERBITS = (1 << 0), Nit: I tried declaring driver-specific flags, and I find this counter-intuitive. You basically end up with something like: enum my_gpuvm_flags { MY_FLAG_X = DRM_GPUVM_USERBITS, MY_FLAG_Y = DRM_GPUVM_USERBITS << 1, }; instead of the usual enum my_gpuvm_flags { MY_FLAG_X = BIT(0), MY_FLAG_Y = BIT(1), }; pattern. Another issue I see coming is if we end up adding more core flags and drivers start falling short of bits for their own flags. This makes me wonder if we shouldn't kill this notion of USER flags and let drivers store their flags in some dedicated field, given they're likely to derive drm_gpuvm and drm_gpuva with their own object anyway. > +}; > +
On 9/22/23 13:58, Boris Brezillon wrote: > On Wed, 20 Sep 2023 16:42:39 +0200 > Danilo Krummrich <dakr@redhat.com> wrote: > >> +/** >> + * enum drm_gpuvm_flags - flags for struct drm_gpuvm >> + */ >> +enum drm_gpuvm_flags { >> + /** >> + * @DRM_GPUVM_USERBITS: user defined bits >> + */ >> + DRM_GPUVM_USERBITS = (1 << 0), > > Nit: I tried declaring driver-specific flags, and I find this > counter-intuitive. You basically end up with something like: > > enum my_gpuvm_flags { > MY_FLAG_X = DRM_GPUVM_USERBITS, > MY_FLAG_Y = DRM_GPUVM_USERBITS << 1, > }; > > instead of the usual > > enum my_gpuvm_flags { > MY_FLAG_X = BIT(0), > MY_FLAG_Y = BIT(1), > }; > > pattern. Right, same as with dma_fence flags. > > Another issue I see coming is if we end up adding more core flags and > drivers start falling short of bits for their own flags. This makes me > wonder if we shouldn't kill this notion of USER flags and let drivers > store their flags in some dedicated field, given they're likely to > derive drm_gpuvm and drm_gpuva with their own object anyway. The only reason I have this in the code is that Xe asked for this with drm_gpuva_flags. Hence, for consistency reasons I added it for drm_gpuvm_flags too. Drivers can still have their own flag fields if needed, otherwise I guess it doesn't really hurt to keep DRM_GPUVM_USERBITS in case someone wants to use it. > >> +}; >> + >
On Wed, 27 Sep 2023 18:52:55 +0200 Danilo Krummrich <dakr@redhat.com> wrote: > On 9/22/23 13:58, Boris Brezillon wrote: > > On Wed, 20 Sep 2023 16:42:39 +0200 > > Danilo Krummrich <dakr@redhat.com> wrote: > > > >> +/** > >> + * enum drm_gpuvm_flags - flags for struct drm_gpuvm > >> + */ > >> +enum drm_gpuvm_flags { > >> + /** > >> + * @DRM_GPUVM_USERBITS: user defined bits > >> + */ > >> + DRM_GPUVM_USERBITS = (1 << 0), > > > > Nit: I tried declaring driver-specific flags, and I find this > > counter-intuitive. You basically end up with something like: > > > > enum my_gpuvm_flags { > > MY_FLAG_X = DRM_GPUVM_USERBITS, > > MY_FLAG_Y = DRM_GPUVM_USERBITS << 1, > > }; > > > > instead of the usual > > > > enum my_gpuvm_flags { > > MY_FLAG_X = BIT(0), > > MY_FLAG_Y = BIT(1), > > }; > > > > pattern. > > Right, same as with dma_fence flags. > > > > > Another issue I see coming is if we end up adding more core flags and > > drivers start falling short of bits for their own flags. This makes me > > wonder if we shouldn't kill this notion of USER flags and let drivers > > store their flags in some dedicated field, given they're likely to > > derive drm_gpuvm and drm_gpuva with their own object anyway. > > The only reason I have this in the code is that Xe asked for this with > drm_gpuva_flags. Hence, for consistency reasons I added it for drm_gpuvm_flags > too. Yeah, my comment stands for both drm_gpuva_flags and drm_gpuvm_flags actually. > > Drivers can still have their own flag fields if needed, otherwise I guess it > doesn't really hurt to keep DRM_GPUVM_USERBITS in case someone wants to use it. Sure, it doesn't hurt, but given drivers are inheriting from this object anyway, I thought it'd be simpler/more future proof to let them have their flags in a separate field. It's not like we care about saving 4 bytes in such a big object. Might be a bit different for drm_gpuva given the amount of live mappings one VM might have, but even there, I suspect the current drm_gpuva size is going to hurt if we have millions of 4k mappings, so, four more bytes won't make a huge difference... Anyway, I don't think that's a blocker, I just thought I'd mention it, that's all.
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index 6ee224e1121e..6e9d2d478bb8 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -705,7 +705,7 @@ drm_gpuva_range_valid(struct drm_gpuvm *gpuvm, */ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, struct drm_device *drm, - const char *name, + const char *name, enum drm_gpuva_flags flags, u64 start_offset, u64 range, u64 reserve_offset, u64 reserve_range, const struct drm_gpuvm_ops *ops) @@ -718,6 +718,7 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, struct drm_device *drm, gpuvm->mm_range = range; gpuvm->name = name ? name : "unknown"; + gpuvm->flags = flags; gpuvm->ops = ops; memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index cf709afd2ac7..3de8533841db 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1864,7 +1864,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, uvmm->kernel_managed_addr = kernel_managed_addr; uvmm->kernel_managed_size = kernel_managed_size; - drm_gpuvm_init(&uvmm->base, cli->drm->dev, cli->name, + drm_gpuvm_init(&uvmm->base, cli->drm->dev, cli->name, 0, NOUVEAU_VA_SPACE_START, NOUVEAU_VA_SPACE_END, kernel_managed_addr, kernel_managed_size, diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index 2c9ad6eb9401..f57ad1f0f0d0 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -192,6 +192,16 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) return va->flags & DRM_GPUVA_INVALIDATED; } +/** + * enum drm_gpuvm_flags - flags for struct drm_gpuvm + */ +enum drm_gpuvm_flags { + /** + * @DRM_GPUVM_USERBITS: user defined bits + */ + DRM_GPUVM_USERBITS = (1 << 0), +}; + /** * struct drm_gpuvm - DRM GPU VA Manager * @@ -210,6 +220,11 @@ struct drm_gpuvm { */ const char *name; + /** + * @flags: the &drm_gpuvm_flags of this GPUVM + */ + enum drm_gpuva_flags flags; + /** * @mm_start: start of the VA space */ @@ -256,7 +271,7 @@ struct drm_gpuvm { }; void drm_gpuvm_init(struct drm_gpuvm *gpuvm, struct drm_device *drm, - const char *name, + const char *name, enum drm_gpuva_flags flags, u64 start_offset, u64 range, u64 reserve_offset, u64 reserve_range, const struct drm_gpuvm_ops *ops);
Introduce flags for struct drm_gpuvm, this required by subsequent commits. Signed-off-by: Danilo Krummrich <dakr@redhat.com> --- drivers/gpu/drm/drm_gpuvm.c | 3 ++- drivers/gpu/drm/nouveau/nouveau_uvmm.c | 2 +- include/drm/drm_gpuvm.h | 17 ++++++++++++++++- 3 files changed, 19 insertions(+), 3 deletions(-)