diff mbox

drm/radeon: fix TOPDOWN handling for bo_create

Message ID CADnq5_OqLXrQS8on_RCGm6q4WaLSFE1GnwGdxbOmUHuNajP2Bg@mail.gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Alex Deucher March 11, 2015, 9:14 p.m. UTC
On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
> <deathsimple@vodafone.de> wrote:
>> On 11.03.2015 16:44, Alex Deucher wrote:
>>>
>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
>>> uses the ttm bo size to determine when to select top down
>>> allocation but since the ttm bo is not initialized yet the
>>> check is always false.
>>>
>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>>> Cc: stable@vger.kernel.org
>>
>>
>> And I was already wondering why the heck the BOs always made this ping/pong
>> in memory after creation.
>>
>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
>
> And fixing that promptly broke VCE due to vram location requirements.
> Updated patch attached.  Thoughts?

And one more take to make things a bit more explicit for static kernel
driver allocations.

Alex

>
> Alex
>
>>
>> Regards,
>> Christian.
>>
>>
>>> ---
>>>   drivers/gpu/drm/radeon/radeon.h        |  3 ++-
>>>   drivers/gpu/drm/radeon/radeon_gem.c    |  2 +-
>>>   drivers/gpu/drm/radeon/radeon_mn.c     |  2 +-
>>>   drivers/gpu/drm/radeon/radeon_object.c | 17 ++++++++++-------
>>>   drivers/gpu/drm/radeon/radeon_ttm.c    | 12 ++++++++----
>>>   5 files changed, 22 insertions(+), 14 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/radeon/radeon.h
>>> b/drivers/gpu/drm/radeon/radeon.h
>>> index 5587603..726e89f 100644
>>> --- a/drivers/gpu/drm/radeon/radeon.h
>>> +++ b/drivers/gpu/drm/radeon/radeon.h
>>> @@ -2970,7 +2970,8 @@ extern void radeon_surface_init(struct radeon_device
>>> *rdev);
>>>   extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void
>>> *data);
>>>   extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev,
>>> int enable);
>>>   extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int
>>> enable);
>>> -extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32
>>> domain);
>>> +extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32
>>> domain,
>>> +                                            u64 size);
>>>   extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
>>>   extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
>>>                                      uint32_t flags);
>>> diff --git a/drivers/gpu/drm/radeon/radeon_gem.c
>>> b/drivers/gpu/drm/radeon/radeon_gem.c
>>> index ac3c131..d613d0c 100644
>>> --- a/drivers/gpu/drm/radeon/radeon_gem.c
>>> +++ b/drivers/gpu/drm/radeon/radeon_gem.c
>>> @@ -337,7 +337,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev,
>>> void *data,
>>>                         goto release_object;
>>>                 }
>>>   -             radeon_ttm_placement_from_domain(bo,
>>> RADEON_GEM_DOMAIN_GTT);
>>> +               radeon_ttm_placement_from_domain(bo,
>>> RADEON_GEM_DOMAIN_GTT, bo->tbo.mem.size);
>>>                 r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
>>> false);
>>>                 radeon_bo_unreserve(bo);
>>>                 up_read(&current->mm->mmap_sem);
>>> diff --git a/drivers/gpu/drm/radeon/radeon_mn.c
>>> b/drivers/gpu/drm/radeon/radeon_mn.c
>>> index a69bd44..e51f09b 100644
>>> --- a/drivers/gpu/drm/radeon/radeon_mn.c
>>> +++ b/drivers/gpu/drm/radeon/radeon_mn.c
>>> @@ -141,7 +141,7 @@ static void radeon_mn_invalidate_range_start(struct
>>> mmu_notifier *mn,
>>>                                 DRM_ERROR("(%d) failed to wait for user
>>> bo\n", r);
>>>                 }
>>>   -             radeon_ttm_placement_from_domain(bo,
>>> RADEON_GEM_DOMAIN_CPU);
>>> +               radeon_ttm_placement_from_domain(bo,
>>> RADEON_GEM_DOMAIN_CPU, bo->tbo.mem.size);
>>>                 r = ttm_bo_validate(&bo->tbo, &bo->placement, false,
>>> false);
>>>                 if (r)
>>>                         DRM_ERROR("(%d) failed to validate user bo\n", r);
>>> diff --git a/drivers/gpu/drm/radeon/radeon_object.c
>>> b/drivers/gpu/drm/radeon/radeon_object.c
>>> index 43e0994..07f8fd5 100644
>>> --- a/drivers/gpu/drm/radeon/radeon_object.c
>>> +++ b/drivers/gpu/drm/radeon/radeon_object.c
>>> @@ -93,7 +93,8 @@ bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object
>>> *bo)
>>>         return false;
>>>   }
>>>   -void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32
>>> domain)
>>> +void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain,
>>> +                                     u64 size)
>>>   {
>>>         u32 c = 0, i;
>>>   @@ -179,7 +180,7 @@ void radeon_ttm_placement_from_domain(struct
>>> radeon_bo *rbo, u32 domain)
>>>          * improve fragmentation quality.
>>>          * 512kb was measured as the most optimal number.
>>>          */
>>> -       if (rbo->tbo.mem.size > 512 * 1024) {
>>> +       if (size > 512 * 1024) {
>>>                 for (i = 0; i < c; i++) {
>>>                         rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
>>>                 }
>>> @@ -252,7 +253,7 @@ int radeon_bo_create(struct radeon_device *rdev,
>>>         bo->flags &= ~RADEON_GEM_GTT_WC;
>>>   #endif
>>>   -     radeon_ttm_placement_from_domain(bo, domain);
>>> +       radeon_ttm_placement_from_domain(bo, domain, size);
>>>         /* Kernel allocation are uninterruptible */
>>>         down_read(&rdev->pm.mclk_lock);
>>>         r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
>>> @@ -350,7 +351,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32
>>> domain, u64 max_offset,
>>>                 return 0;
>>>         }
>>> -       radeon_ttm_placement_from_domain(bo, domain);
>>> +       radeon_ttm_placement_from_domain(bo, domain, bo->tbo.mem.size);
>>>         for (i = 0; i < bo->placement.num_placement; i++) {
>>>                 /* force to pin into visible video ram */
>>>                 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
>>> @@ -557,7 +558,7 @@ int radeon_bo_list_validate(struct radeon_device
>>> *rdev,
>>>                         }
>>>                 retry:
>>> -                       radeon_ttm_placement_from_domain(bo, domain);
>>> +                       radeon_ttm_placement_from_domain(bo, domain,
>>> bo->tbo.mem.size);
>>>                         if (ring == R600_RING_TYPE_UVD_INDEX)
>>>                                 radeon_uvd_force_into_uvd_segment(bo,
>>> allowed);
>>>   @@ -800,7 +801,8 @@ int radeon_bo_fault_reserve_notify(struct
>>> ttm_buffer_object *bo)
>>>                 return 0;
>>>         /* hurrah the memory is not visible ! */
>>> -       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
>>> +       radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM,
>>> +                                        rbo->tbo.mem.size);
>>>         lpfn =  rdev->mc.visible_vram_size >> PAGE_SHIFT;
>>>         for (i = 0; i < rbo->placement.num_placement; i++) {
>>>                 /* Force into visible VRAM */
>>> @@ -810,7 +812,8 @@ int radeon_bo_fault_reserve_notify(struct
>>> ttm_buffer_object *bo)
>>>         }
>>>         r = ttm_bo_validate(bo, &rbo->placement, false, false);
>>>         if (unlikely(r == -ENOMEM)) {
>>> -               radeon_ttm_placement_from_domain(rbo,
>>> RADEON_GEM_DOMAIN_GTT);
>>> +               radeon_ttm_placement_from_domain(rbo,
>>> RADEON_GEM_DOMAIN_GTT,
>>> +                                                rbo->tbo.mem.size);
>>>                 return ttm_bo_validate(bo, &rbo->placement, false, false);
>>>         } else if (unlikely(r != 0)) {
>>>                 return r;
>>> diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c
>>> b/drivers/gpu/drm/radeon/radeon_ttm.c
>>> index d02aa1d..ce8ed2d 100644
>>> --- a/drivers/gpu/drm/radeon/radeon_ttm.c
>>> +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
>>> @@ -197,7 +197,8 @@ static void radeon_evict_flags(struct
>>> ttm_buffer_object *bo,
>>>         switch (bo->mem.mem_type) {
>>>         case TTM_PL_VRAM:
>>>                 if
>>> (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
>>> -                       radeon_ttm_placement_from_domain(rbo,
>>> RADEON_GEM_DOMAIN_CPU);
>>> +                       radeon_ttm_placement_from_domain(rbo,
>>> RADEON_GEM_DOMAIN_CPU,
>>> +
>>> rbo->tbo.mem.size);
>>>                 else if (rbo->rdev->mc.visible_vram_size <
>>> rbo->rdev->mc.real_vram_size &&
>>>                          bo->mem.start < (rbo->rdev->mc.visible_vram_size
>>> >> PAGE_SHIFT)) {
>>>                         unsigned fpfn = rbo->rdev->mc.visible_vram_size >>
>>> PAGE_SHIFT;
>>> @@ -209,7 +210,8 @@ static void radeon_evict_flags(struct
>>> ttm_buffer_object *bo,
>>>                          * BOs to be evicted from VRAM
>>>                          */
>>>                         radeon_ttm_placement_from_domain(rbo,
>>> RADEON_GEM_DOMAIN_VRAM |
>>> -
>>> RADEON_GEM_DOMAIN_GTT);
>>> +
>>> RADEON_GEM_DOMAIN_GTT,
>>> +
>>> rbo->tbo.mem.size);
>>>                         rbo->placement.num_busy_placement = 0;
>>>                         for (i = 0; i < rbo->placement.num_placement; i++)
>>> {
>>>                                 if (rbo->placements[i].flags &
>>> TTM_PL_FLAG_VRAM) {
>>> @@ -222,11 +224,13 @@ static void radeon_evict_flags(struct
>>> ttm_buffer_object *bo,
>>>                                 }
>>>                         }
>>>                 } else
>>> -                       radeon_ttm_placement_from_domain(rbo,
>>> RADEON_GEM_DOMAIN_GTT);
>>> +                       radeon_ttm_placement_from_domain(rbo,
>>> RADEON_GEM_DOMAIN_GTT,
>>> +
>>> rbo->tbo.mem.size);
>>>                 break;
>>>         case TTM_PL_TT:
>>>         default:
>>> -               radeon_ttm_placement_from_domain(rbo,
>>> RADEON_GEM_DOMAIN_CPU);
>>> +               radeon_ttm_placement_from_domain(rbo,
>>> RADEON_GEM_DOMAIN_CPU,
>>> +                                                rbo->tbo.mem.size);
>>>         }
>>>         *placement = rbo->placement;
>>>   }
>>
>>

Comments

Michel Dänzer March 12, 2015, 9:02 a.m. UTC | #1
On 12.03.2015 06:14, Alex Deucher wrote:
> On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
>> <deathsimple@vodafone.de> wrote:
>>> On 11.03.2015 16:44, Alex Deucher wrote:
>>>>
>>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
>>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
>>>> uses the ttm bo size to determine when to select top down
>>>> allocation but since the ttm bo is not initialized yet the
>>>> check is always false.
>>>>
>>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>>>> Cc: stable@vger.kernel.org
>>>
>>>
>>> And I was already wondering why the heck the BOs always made this ping/pong
>>> in memory after creation.
>>>
>>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
>>
>> And fixing that promptly broke VCE due to vram location requirements.
>> Updated patch attached.  Thoughts?
> 
> And one more take to make things a bit more explicit for static kernel
> driver allocations.

struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
problem is really that some BOs are expected to be within a certain
range from the beginning of VRAM, but lpfn isn't set accordingly. It
would be better to fix that by setting lpfn directly than indirectly via
RADEON_GEM_CPU_ACCESS.


Anyway, since this isn't the first bug which prevents
TTM_PL_FLAG_TOPDOWN from working as intended in the radeon driver, I
wonder if its performance impact should be re-evaluated. Lauri?
Christian König March 12, 2015, 9:23 a.m. UTC | #2
On 12.03.2015 10:02, Michel Dänzer wrote:
> On 12.03.2015 06:14, Alex Deucher wrote:
>> On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>>> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
>>> <deathsimple@vodafone.de> wrote:
>>>> On 11.03.2015 16:44, Alex Deucher wrote:
>>>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
>>>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
>>>>> uses the ttm bo size to determine when to select top down
>>>>> allocation but since the ttm bo is not initialized yet the
>>>>> check is always false.
>>>>>
>>>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
>>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>>>>> Cc: stable@vger.kernel.org
>>>>
>>>> And I was already wondering why the heck the BOs always made this ping/pong
>>>> in memory after creation.
>>>>
>>>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
>>> And fixing that promptly broke VCE due to vram location requirements.
>>> Updated patch attached.  Thoughts?
>> And one more take to make things a bit more explicit for static kernel
>> driver allocations.
> struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
> latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
> problem is really that some BOs are expected to be within a certain
> range from the beginning of VRAM, but lpfn isn't set accordingly. It
> would be better to fix that by setting lpfn directly than indirectly via
> RADEON_GEM_CPU_ACCESS.

Yeah, agree. We should probably try to find the root cause of this instead.

As far as I know VCE has no documented limitation on where buffers are 
placed (unlike UVD). So this is a bit strange. Are you sure that it 
isn't UVD which breaks here?

Regards,
Christian.

>
>
> Anyway, since this isn't the first bug which prevents
> TTM_PL_FLAG_TOPDOWN from working as intended in the radeon driver, I
> wonder if its performance impact should be re-evaluated. Lauri?
>
>
Oded Gabbay March 12, 2015, 9:30 a.m. UTC | #3
On 03/12/2015 11:23 AM, Christian König wrote:
> On 12.03.2015 10:02, Michel Dänzer wrote:
>> On 12.03.2015 06:14, Alex Deucher wrote:
>>> On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>>>> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
>>>> <deathsimple@vodafone.de> wrote:
>>>>> On 11.03.2015 16:44, Alex Deucher wrote:
>>>>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
>>>>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
>>>>>> uses the ttm bo size to determine when to select top down
>>>>>> allocation but since the ttm bo is not initialized yet the
>>>>>> check is always false.
>>>>>>
>>>>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
>>>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>>>>>> Cc: stable@vger.kernel.org
>>>>>
>>>>> And I was already wondering why the heck the BOs always made this
>>>>> ping/pong
>>>>> in memory after creation.
>>>>>
>>>>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
>>>> And fixing that promptly broke VCE due to vram location requirements.
>>>> Updated patch attached.  Thoughts?
>>> And one more take to make things a bit more explicit for static kernel
>>> driver allocations.
>> struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
>> latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
>> problem is really that some BOs are expected to be within a certain
>> range from the beginning of VRAM, but lpfn isn't set accordingly. It
>> would be better to fix that by setting lpfn directly than indirectly via
>> RADEON_GEM_CPU_ACCESS.
> 
> Yeah, agree. We should probably try to find the root cause of this instead.
> 
> As far as I know VCE has no documented limitation on where buffers are
> placed (unlike UVD). So this is a bit strange. Are you sure that it isn't
> UVD which breaks here?
> 
> Regards,
> Christian.
I noticed this bug when trying to allocate very large BOs (385MB) from the
other side of VRAM.
However, even with this fix, the following scenario still fails:
1. Allocate BO of 385MB on VRAM with no CPU access.
2. Map it to VRAM
3. Allocate second BO of 385MB on VRAM with no CPU access

The last step fails as the ttm can't find a place to put this second BO. I
suspect the Top-Down thing isn't being respected at all by the
creation/pinning of BO.

I think that what happens is that the first BO is pinned right after the
first 256 MB, instead of pinning it at the end of the VRAM.
Then, when trying to create the second BO, there is no room for it, as there
is only 256MB before the first BO, and 383MB after the first BO.

I need to debug it further, but will probably only do that on Sunday.

	Oded

> 
>>
>>
>> Anyway, since this isn't the first bug which prevents
>> TTM_PL_FLAG_TOPDOWN from working as intended in the radeon driver, I
>> wonder if its performance impact should be re-evaluated. Lauri?
>>
>>
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
Christian König March 12, 2015, 9:36 a.m. UTC | #4
On 12.03.2015 10:30, Oded Gabbay wrote:
>
> On 03/12/2015 11:23 AM, Christian König wrote:
>> On 12.03.2015 10:02, Michel Dänzer wrote:
>>> On 12.03.2015 06:14, Alex Deucher wrote:
>>>> On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>>>>> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
>>>>> <deathsimple@vodafone.de> wrote:
>>>>>> On 11.03.2015 16:44, Alex Deucher wrote:
>>>>>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
>>>>>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
>>>>>>> uses the ttm bo size to determine when to select top down
>>>>>>> allocation but since the ttm bo is not initialized yet the
>>>>>>> check is always false.
>>>>>>>
>>>>>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
>>>>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>>>>>>> Cc: stable@vger.kernel.org
>>>>>> And I was already wondering why the heck the BOs always made this
>>>>>> ping/pong
>>>>>> in memory after creation.
>>>>>>
>>>>>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
>>>>> And fixing that promptly broke VCE due to vram location requirements.
>>>>> Updated patch attached.  Thoughts?
>>>> And one more take to make things a bit more explicit for static kernel
>>>> driver allocations.
>>> struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
>>> latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
>>> problem is really that some BOs are expected to be within a certain
>>> range from the beginning of VRAM, but lpfn isn't set accordingly. It
>>> would be better to fix that by setting lpfn directly than indirectly via
>>> RADEON_GEM_CPU_ACCESS.
>> Yeah, agree. We should probably try to find the root cause of this instead.
>>
>> As far as I know VCE has no documented limitation on where buffers are
>> placed (unlike UVD). So this is a bit strange. Are you sure that it isn't
>> UVD which breaks here?
>>
>> Regards,
>> Christian.
> I noticed this bug when trying to allocate very large BOs (385MB) from the
> other side of VRAM.
> However, even with this fix, the following scenario still fails:
> 1. Allocate BO of 385MB on VRAM with no CPU access.
> 2. Map it to VRAM
> 3. Allocate second BO of 385MB on VRAM with no CPU access
>
> The last step fails as the ttm can't find a place to put this second BO. I
> suspect the Top-Down thing isn't being respected at all by the
> creation/pinning of BO.
>
> I think that what happens is that the first BO is pinned right after the
> first 256 MB, instead of pinning it at the end of the VRAM.
> Then, when trying to create the second BO, there is no room for it, as there
> is only 256MB before the first BO, and 383MB after the first BO.
>
> I need to debug it further, but will probably only do that on Sunday.

What is the content of radeon_vram_mm (in debugfs) after you allocated 
the first BO?

The placement should be visible there pretty fine.

Regards,
Christian.

>
> 	Oded
>
>>>
>>> Anyway, since this isn't the first bug which prevents
>>> TTM_PL_FLAG_TOPDOWN from working as intended in the radeon driver, I
>>> wonder if its performance impact should be re-evaluated. Lauri?
>>>
>>>
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel@lists.freedesktop.org
>> http://lists.freedesktop.org/mailman/listinfo/dri-devel
Lauri Kasanen March 12, 2015, 10:21 a.m. UTC | #5
On Thu, 12 Mar 2015 18:02:56 +0900
Michel Dänzer <michel@daenzer.net> wrote:

> struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
> latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
> problem is really that some BOs are expected to be within a certain
> range from the beginning of VRAM, but lpfn isn't set accordingly. It
> would be better to fix that by setting lpfn directly than indirectly via
> RADEON_GEM_CPU_ACCESS.
> 
> 
> Anyway, since this isn't the first bug which prevents
> TTM_PL_FLAG_TOPDOWN from working as intended in the radeon driver, I
> wonder if its performance impact should be re-evaluated. Lauri?

I'm sorry, I'm not in a place where I could spend the time to redo the
benchmarks.

If it causes too many issues it is of course easy to disable, but so
far the issues shown have not been caused by it - it merely exposed
wrong settings/bugs elsewhere. From this POV I would say it's good to
have it enabled, to stress the various parts.

This doesn't warm the heart of the guy with flicker after suspend, so
perhaps a kernel module parameter to disable it (defaulting to enabled)?

- Lauri
Alex Deucher March 12, 2015, 1:09 p.m. UTC | #6
On Thu, Mar 12, 2015 at 5:23 AM, Christian König
<deathsimple@vodafone.de> wrote:
> On 12.03.2015 10:02, Michel Dänzer wrote:
>>
>> On 12.03.2015 06:14, Alex Deucher wrote:
>>>
>>> On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com>
>>> wrote:
>>>>
>>>> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
>>>> <deathsimple@vodafone.de> wrote:
>>>>>
>>>>> On 11.03.2015 16:44, Alex Deucher wrote:
>>>>>>
>>>>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
>>>>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
>>>>>> uses the ttm bo size to determine when to select top down
>>>>>> allocation but since the ttm bo is not initialized yet the
>>>>>> check is always false.
>>>>>>
>>>>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
>>>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>>>>>> Cc: stable@vger.kernel.org
>>>>>
>>>>>
>>>>> And I was already wondering why the heck the BOs always made this
>>>>> ping/pong
>>>>> in memory after creation.
>>>>>
>>>>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
>>>>
>>>> And fixing that promptly broke VCE due to vram location requirements.
>>>> Updated patch attached.  Thoughts?
>>>
>>> And one more take to make things a bit more explicit for static kernel
>>> driver allocations.
>>
>> struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
>> latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
>> problem is really that some BOs are expected to be within a certain
>> range from the beginning of VRAM, but lpfn isn't set accordingly. It
>> would be better to fix that by setting lpfn directly than indirectly via
>> RADEON_GEM_CPU_ACCESS.
>
>
> Yeah, agree. We should probably try to find the root cause of this instead.
>
> As far as I know VCE has no documented limitation on where buffers are
> placed (unlike UVD). So this is a bit strange. Are you sure that it isn't
> UVD which breaks here?

It's definitely VCE, I don't know why UVD didn't have a problem.  I
considered using pin_restricted to make sure it got pinned in the CPU
visible region, but that had two problems: 1. it would end up getting
migrated when pinned, 2. it would end up at the top of the restricted
region since the top down flag is set which would end up fragmenting
vram.

Alex

>
> Regards,
> Christian.
>
>
>>
>>
>> Anyway, since this isn't the first bug which prevents
>> TTM_PL_FLAG_TOPDOWN from working as intended in the radeon driver, I
>> wonder if its performance impact should be re-evaluated. Lauri?
>>
>>
>
Michel Dänzer March 13, 2015, 2:55 a.m. UTC | #7
On 12.03.2015 22:09, Alex Deucher wrote:
> On Thu, Mar 12, 2015 at 5:23 AM, Christian König
> <deathsimple@vodafone.de> wrote:
>> On 12.03.2015 10:02, Michel Dänzer wrote:
>>>
>>> On 12.03.2015 06:14, Alex Deucher wrote:
>>>>
>>>> On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com>
>>>> wrote:
>>>>>
>>>>> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
>>>>> <deathsimple@vodafone.de> wrote:
>>>>>>
>>>>>> On 11.03.2015 16:44, Alex Deucher wrote:
>>>>>>>
>>>>>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
>>>>>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
>>>>>>> uses the ttm bo size to determine when to select top down
>>>>>>> allocation but since the ttm bo is not initialized yet the
>>>>>>> check is always false.
>>>>>>>
>>>>>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
>>>>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>>>>>>> Cc: stable@vger.kernel.org
>>>>>>
>>>>>>
>>>>>> And I was already wondering why the heck the BOs always made this
>>>>>> ping/pong
>>>>>> in memory after creation.
>>>>>>
>>>>>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
>>>>>
>>>>> And fixing that promptly broke VCE due to vram location requirements.
>>>>> Updated patch attached.  Thoughts?
>>>>
>>>> And one more take to make things a bit more explicit for static kernel
>>>> driver allocations.
>>>
>>> struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
>>> latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
>>> problem is really that some BOs are expected to be within a certain
>>> range from the beginning of VRAM, but lpfn isn't set accordingly. It
>>> would be better to fix that by setting lpfn directly than indirectly via
>>> RADEON_GEM_CPU_ACCESS.
>>
>>
>> Yeah, agree. We should probably try to find the root cause of this instead.
>>
>> As far as I know VCE has no documented limitation on where buffers are
>> placed (unlike UVD). So this is a bit strange. Are you sure that it isn't
>> UVD which breaks here?
> 
> It's definitely VCE, I don't know why UVD didn't have a problem.  I
> considered using pin_restricted to make sure it got pinned in the CPU
> visible region, but that had two problems: 1. it would end up getting
> migrated when pinned,

Maybe something like radeon_uvd_force_into_uvd_segment() is needed for
VCE as well?


> 2. it would end up at the top of the restricted
> region since the top down flag is set which would end up fragmenting
> vram.

If that's an issue (which outweighs the supposed benefit of
TTM_PL_FLAG_TOPDOWN), then again the proper solution would be not to set
TTM_PL_FLAG_TOPDOWN when rbo->placements[i].lpfn != 0 and smaller than
the whole available region, instead of checking for VRAM and
RADEON_GEM_CPU_ACCESS.
Daniel Vetter March 13, 2015, 9:11 a.m. UTC | #8
On Thu, Mar 12, 2015 at 06:02:56PM +0900, Michel Dänzer wrote:
> On 12.03.2015 06:14, Alex Deucher wrote:
> > On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
> >> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
> >> <deathsimple@vodafone.de> wrote:
> >>> On 11.03.2015 16:44, Alex Deucher wrote:
> >>>>
> >>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
> >>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
> >>>> uses the ttm bo size to determine when to select top down
> >>>> allocation but since the ttm bo is not initialized yet the
> >>>> check is always false.
> >>>>
> >>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
> >>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
> >>>> Cc: stable@vger.kernel.org
> >>>
> >>>
> >>> And I was already wondering why the heck the BOs always made this ping/pong
> >>> in memory after creation.
> >>>
> >>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
> >>
> >> And fixing that promptly broke VCE due to vram location requirements.
> >> Updated patch attached.  Thoughts?
> > 
> > And one more take to make things a bit more explicit for static kernel
> > driver allocations.
> 
> struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
> latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
> problem is really that some BOs are expected to be within a certain
> range from the beginning of VRAM, but lpfn isn't set accordingly. It
> would be better to fix that by setting lpfn directly than indirectly via
> RADEON_GEM_CPU_ACCESS.
> 
> 
> Anyway, since this isn't the first bug which prevents
> TTM_PL_FLAG_TOPDOWN from working as intended in the radeon driver, I
> wonder if its performance impact should be re-evaluated. Lauri?

Topdown allocation in drm_mm is just a hint/bias really, it won't try too
hard to segregate things. If you depend upon perfect topdown allocation
for correctness then this won't work well. The trouble starts once you've
split your free space up - it's not going to look for the topmost hole
first but still picks just the one on top of the stack.
-Daniel
Michel Dänzer March 13, 2015, 9:46 a.m. UTC | #9
On 13.03.2015 18:11, Daniel Vetter wrote:
> On Thu, Mar 12, 2015 at 06:02:56PM +0900, Michel Dänzer wrote:
>> On 12.03.2015 06:14, Alex Deucher wrote:
>>> On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>>>> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
>>>> <deathsimple@vodafone.de> wrote:
>>>>> On 11.03.2015 16:44, Alex Deucher wrote:
>>>>>>
>>>>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
>>>>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
>>>>>> uses the ttm bo size to determine when to select top down
>>>>>> allocation but since the ttm bo is not initialized yet the
>>>>>> check is always false.
>>>>>>
>>>>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
>>>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>>>>>> Cc: stable@vger.kernel.org
>>>>>
>>>>>
>>>>> And I was already wondering why the heck the BOs always made this ping/pong
>>>>> in memory after creation.
>>>>>
>>>>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
>>>>
>>>> And fixing that promptly broke VCE due to vram location requirements.
>>>> Updated patch attached.  Thoughts?
>>>
>>> And one more take to make things a bit more explicit for static kernel
>>> driver allocations.
>>
>> struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
>> latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
>> problem is really that some BOs are expected to be within a certain
>> range from the beginning of VRAM, but lpfn isn't set accordingly. It
>> would be better to fix that by setting lpfn directly than indirectly via
>> RADEON_GEM_CPU_ACCESS.
>>
>>
>> Anyway, since this isn't the first bug which prevents
>> TTM_PL_FLAG_TOPDOWN from working as intended in the radeon driver, I
>> wonder if its performance impact should be re-evaluated. Lauri?
> 
> Topdown allocation in drm_mm is just a hint/bias really, it won't try too
> hard to segregate things. If you depend upon perfect topdown allocation
> for correctness then this won't work well. The trouble starts once you've
> split your free space up - it's not going to look for the topmost hole
> first but still picks just the one on top of the stack.

TTM_PL_FLAG_TOPDOWN sets DRM_MM_SEARCH_BELOW as well as
DRM_MM_CREATE_TOP. So it should traverse the list of holes in reverse
order, right?
Daniel Vetter March 13, 2015, 4:36 p.m. UTC | #10
On Fri, Mar 13, 2015 at 06:46:33PM +0900, Michel Dänzer wrote:
> On 13.03.2015 18:11, Daniel Vetter wrote:
> > On Thu, Mar 12, 2015 at 06:02:56PM +0900, Michel Dänzer wrote:
> >> On 12.03.2015 06:14, Alex Deucher wrote:
> >>> On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
> >>>> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
> >>>> <deathsimple@vodafone.de> wrote:
> >>>>> On 11.03.2015 16:44, Alex Deucher wrote:
> >>>>>>
> >>>>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
> >>>>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
> >>>>>> uses the ttm bo size to determine when to select top down
> >>>>>> allocation but since the ttm bo is not initialized yet the
> >>>>>> check is always false.
> >>>>>>
> >>>>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
> >>>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
> >>>>>> Cc: stable@vger.kernel.org
> >>>>>
> >>>>>
> >>>>> And I was already wondering why the heck the BOs always made this ping/pong
> >>>>> in memory after creation.
> >>>>>
> >>>>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
> >>>>
> >>>> And fixing that promptly broke VCE due to vram location requirements.
> >>>> Updated patch attached.  Thoughts?
> >>>
> >>> And one more take to make things a bit more explicit for static kernel
> >>> driver allocations.
> >>
> >> struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
> >> latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
> >> problem is really that some BOs are expected to be within a certain
> >> range from the beginning of VRAM, but lpfn isn't set accordingly. It
> >> would be better to fix that by setting lpfn directly than indirectly via
> >> RADEON_GEM_CPU_ACCESS.
> >>
> >>
> >> Anyway, since this isn't the first bug which prevents
> >> TTM_PL_FLAG_TOPDOWN from working as intended in the radeon driver, I
> >> wonder if its performance impact should be re-evaluated. Lauri?
> > 
> > Topdown allocation in drm_mm is just a hint/bias really, it won't try too
> > hard to segregate things. If you depend upon perfect topdown allocation
> > for correctness then this won't work well. The trouble starts once you've
> > split your free space up - it's not going to look for the topmost hole
> > first but still picks just the one on top of the stack.
> 
> TTM_PL_FLAG_TOPDOWN sets DRM_MM_SEARCH_BELOW as well as
> DRM_MM_CREATE_TOP. So it should traverse the list of holes in reverse
> order, right?

Sure that additional segregation helps a bit more, but in the end if you
split things badly and are a bit unlucky then the buffer can end up pretty
much anywhere. Just wanted to mention that in case someone gets confused
when the buffers end up in unexpected places.
-Daniel
Alex Deucher March 13, 2015, 5:57 p.m. UTC | #11
On Fri, Mar 13, 2015 at 12:36 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Fri, Mar 13, 2015 at 06:46:33PM +0900, Michel Dänzer wrote:
>> On 13.03.2015 18:11, Daniel Vetter wrote:
>> > On Thu, Mar 12, 2015 at 06:02:56PM +0900, Michel Dänzer wrote:
>> >> On 12.03.2015 06:14, Alex Deucher wrote:
>> >>> On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>> >>>> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
>> >>>> <deathsimple@vodafone.de> wrote:
>> >>>>> On 11.03.2015 16:44, Alex Deucher wrote:
>> >>>>>>
>> >>>>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
>> >>>>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
>> >>>>>> uses the ttm bo size to determine when to select top down
>> >>>>>> allocation but since the ttm bo is not initialized yet the
>> >>>>>> check is always false.
>> >>>>>>
>> >>>>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
>> >>>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>> >>>>>> Cc: stable@vger.kernel.org
>> >>>>>
>> >>>>>
>> >>>>> And I was already wondering why the heck the BOs always made this ping/pong
>> >>>>> in memory after creation.
>> >>>>>
>> >>>>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
>> >>>>
>> >>>> And fixing that promptly broke VCE due to vram location requirements.
>> >>>> Updated patch attached.  Thoughts?
>> >>>
>> >>> And one more take to make things a bit more explicit for static kernel
>> >>> driver allocations.
>> >>
>> >> struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
>> >> latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
>> >> problem is really that some BOs are expected to be within a certain
>> >> range from the beginning of VRAM, but lpfn isn't set accordingly. It
>> >> would be better to fix that by setting lpfn directly than indirectly via
>> >> RADEON_GEM_CPU_ACCESS.
>> >>
>> >>
>> >> Anyway, since this isn't the first bug which prevents
>> >> TTM_PL_FLAG_TOPDOWN from working as intended in the radeon driver, I
>> >> wonder if its performance impact should be re-evaluated. Lauri?
>> >
>> > Topdown allocation in drm_mm is just a hint/bias really, it won't try too
>> > hard to segregate things. If you depend upon perfect topdown allocation
>> > for correctness then this won't work well. The trouble starts once you've
>> > split your free space up - it's not going to look for the topmost hole
>> > first but still picks just the one on top of the stack.
>>
>> TTM_PL_FLAG_TOPDOWN sets DRM_MM_SEARCH_BELOW as well as
>> DRM_MM_CREATE_TOP. So it should traverse the list of holes in reverse
>> order, right?
>
> Sure that additional segregation helps a bit more, but in the end if you
> split things badly and are a bit unlucky then the buffer can end up pretty
> much anywhere. Just wanted to mention that in case someone gets confused
> when the buffers end up in unexpected places.

There's no explicit requirement that they have to be at the top or
bottom per se, it's just the the buffers in question have a specific
restricted location requirement and they are set up at driver init
time and not moved for the life of the driver so I'd rather not put
them somewhere too sub-optimal.

Alex

> -Daniel
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
Oded Gabbay March 15, 2015, 3:07 p.m. UTC | #12
On 03/12/2015 11:36 AM, Christian König wrote:
> On 12.03.2015 10:30, Oded Gabbay wrote:
>>
>> On 03/12/2015 11:23 AM, Christian König wrote:
>>> On 12.03.2015 10:02, Michel Dänzer wrote:
>>>> On 12.03.2015 06:14, Alex Deucher wrote:
>>>>> On Wed, Mar 11, 2015 at 4:51 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>>>>>> On Wed, Mar 11, 2015 at 2:21 PM, Christian König
>>>>>> <deathsimple@vodafone.de> wrote:
>>>>>>> On 11.03.2015 16:44, Alex Deucher wrote:
>>>>>>>> radeon_bo_create() calls radeon_ttm_placement_from_domain()
>>>>>>>> before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
>>>>>>>> uses the ttm bo size to determine when to select top down
>>>>>>>> allocation but since the ttm bo is not initialized yet the
>>>>>>>> check is always false.
>>>>>>>>
>>>>>>>> Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
>>>>>>>> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
>>>>>>>> Cc: stable@vger.kernel.org
>>>>>>> And I was already wondering why the heck the BOs always made this
>>>>>>> ping/pong
>>>>>>> in memory after creation.
>>>>>>>
>>>>>>> Patch is Reviewed-by: Christian König <christian.koenig@amd.com>
>>>>>> And fixing that promptly broke VCE due to vram location requirements.
>>>>>> Updated patch attached.  Thoughts?
>>>>> And one more take to make things a bit more explicit for static kernel
>>>>> driver allocations.
>>>> struct ttm_place::lpfn is honoured even with TTM_PL_FLAG_TOPDOWN, so
>>>> latter should work with RADEON_GEM_CPU_ACCESS. It sounds like the
>>>> problem is really that some BOs are expected to be within a certain
>>>> range from the beginning of VRAM, but lpfn isn't set accordingly. It
>>>> would be better to fix that by setting lpfn directly than indirectly via
>>>> RADEON_GEM_CPU_ACCESS.
>>> Yeah, agree. We should probably try to find the root cause of this instead.
>>>
>>> As far as I know VCE has no documented limitation on where buffers are
>>> placed (unlike UVD). So this is a bit strange. Are you sure that it isn't
>>> UVD which breaks here?
>>>
>>> Regards,
>>> Christian.
>> I noticed this bug when trying to allocate very large BOs (385MB) from the
>> other side of VRAM.
>> However, even with this fix, the following scenario still fails:
>> 1. Allocate BO of 385MB on VRAM with no CPU access.
>> 2. Map it to VRAM
>> 3. Allocate second BO of 385MB on VRAM with no CPU access
>>
>> The last step fails as the ttm can't find a place to put this second BO. I
>> suspect the Top-Down thing isn't being respected at all by the
>> creation/pinning of BO.
>>
>> I think that what happens is that the first BO is pinned right after the
>> first 256 MB, instead of pinning it at the end of the VRAM.
>> Then, when trying to create the second BO, there is no room for it, as there
>> is only 256MB before the first BO, and 383MB after the first BO.
>>
>> I need to debug it further, but will probably only do that on Sunday.
>
> What is the content of radeon_vram_mm (in debugfs) after you allocated the first
> BO?
>
> The placement should be visible there pretty fine.
>
> Regards,
> Christian.
>
Here are the contents before the allocation:
root@odedg-test:/sys/kernel/debug/dri/0# cat radeon_vram_mm
0x00000000-0x00000040: 0x00000040: used
0x00000040-0x00000041: 0x00000001: used
0x00000041-0x00000042: 0x00000001: used
0x00000042-0x00000043: 0x00000001: used
0x00000043-0x00000044: 0x00000001: used
0x00000044-0x0000eab4: 0x0000ea70: free
0x0000eab4-0x0000edb4: 0x00000300: used
0x0000edb4-0x0000f3b4: 0x00000600: free
0x0000f3b4-0x0000f6b4: 0x00000300: used
0x0000f6b4-0x0000f8b4: 0x00000200: used
0x0000f8b4-0x0000fdc8: 0x00000514: used
0x0000fdc8-0x00010000: 0x00000238: used
0x00010000-0x00040000: 0x00030000: free
total: 262144, used 3984 free 258160

And here they are after the allocation of 385MB BO (not pinned yet):

root@odedg-test:/sys/kernel/debug/dri/0# cat radeon_vram_mm
0x00000000-0x00000040: 0x00000040: used
0x00000040-0x00000041: 0x00000001: used
0x00000041-0x00000042: 0x00000001: used
0x00000042-0x00000043: 0x00000001: used
0x00000043-0x00000044: 0x00000001: used
0x00000044-0x0000eab4: 0x0000ea70: free
0x0000eab4-0x0000edb4: 0x00000300: used
0x0000edb4-0x0000edb8: 0x00000004: free
0x0000edb8-0x0000edb9: 0x00000001: used
0x0000edb9-0x0000edc1: 0x00000008: used
0x0000edc1-0x0000edc9: 0x00000008: used
0x0000edc9-0x0000edd1: 0x00000008: used
0x0000edd1-0x0000edd9: 0x00000008: used
0x0000edd9-0x0000ede1: 0x00000008: used
0x0000ede1-0x0000ede9: 0x00000008: used
0x0000ede9-0x0000edf1: 0x00000008: used
0x0000edf1-0x0000edf9: 0x00000008: used
0x0000edf9-0x0000ee01: 0x00000008: used
0x0000ee01-0x0000ee09: 0x00000008: used
0x0000ee09-0x0000ee11: 0x00000008: used
0x0000ee11-0x0000ee19: 0x00000008: used
0x0000ee19-0x0000ee21: 0x00000008: used
0x0000ee21-0x0000ee29: 0x00000008: used
0x0000ee29-0x0000ee31: 0x00000008: used
0x0000ee31-0x0000ee39: 0x00000008: used
0x0000ee39-0x0000ee41: 0x00000008: used
0x0000ee41-0x0000ee49: 0x00000008: used
0x0000ee49-0x0000ee51: 0x00000008: used
0x0000ee51-0x0000ee59: 0x00000008: used
0x0000ee59-0x0000ee61: 0x00000008: used
0x0000ee61-0x0000ee69: 0x00000008: used
0x0000ee69-0x0000ee71: 0x00000008: used
0x0000ee71-0x0000ee79: 0x00000008: used
0x0000ee79-0x0000ee81: 0x00000008: used
0x0000ee81-0x0000f3b4: 0x00000533: free
0x0000f3b4-0x0000f6b4: 0x00000300: used
0x0000f6b4-0x0000f8b4: 0x00000200: used
0x0000f8b4-0x0000fdc8: 0x00000514: used
0x0000fdc8-0x00010000: 0x00000238: used
0x00010000-0x00027f00: 0x00017f00: free
0x00027f00-0x00040000: 0x00018100: used
total: 262144, used 102745 free 159399

So apparently ttm take into consideration the TTM_PL_FLAG_TOPDOWN flag.
However, because the rest of the memory is fragmented, I can't allocate more 
than 383MB (0x00010000-0x00027f00)

I assume the contents of 0-0x10000 are taken by the graphics stack and maybe 
some of them are pinned ? Because there is a large free hole at 
0x00000044-0x0000eab4: 0x0000ea70: free

This is an example where dividing the allocation to multiple BOs (of 1-2MB) 
could overcome the fragmentation issue.


	Oded


>>
>>     Oded
>>
>>>>
>>>> Anyway, since this isn't the first bug which prevents
>>>> TTM_PL_FLAG_TOPDOWN from working as intended in the radeon driver, I
>>>> wonder if its performance impact should be re-evaluated. Lauri?
>>>>
>>>>
>>> _______________________________________________
>>> dri-devel mailing list
>>> dri-devel@lists.freedesktop.org
>>> http://lists.freedesktop.org/mailman/listinfo/dri-devel
>
diff mbox

Patch

From 019f96d56915c4ba02ad4bb25acefbea103a084e Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Wed, 11 Mar 2015 11:27:26 -0400
Subject: [PATCH] drm/radeon: fix TOPDOWN handling for bo_create (v3)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

radeon_bo_create() calls radeon_ttm_placement_from_domain()
before ttm_bo_init() is called.  radeon_ttm_placement_from_domain()
uses the ttm bo size to determine when to select top down
allocation but since the ttm bo is not initialized yet the
check is always false.

v2: only use topdown for vram if the user has not requested
CPU access explicitly.  Fixes VCE.

v3: explictly set CPU access on kernel allocations where we
expect allocations to be at the start of vram to avoid
fragmentation and extra migration.

Noticed-by: Oded Gabbay <oded.gabbay@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
---
 drivers/gpu/drm/radeon/evergreen.c     |  6 +++---
 drivers/gpu/drm/radeon/r600.c          |  3 ++-
 drivers/gpu/drm/radeon/radeon.h        |  3 ++-
 drivers/gpu/drm/radeon/radeon_gart.c   |  2 +-
 drivers/gpu/drm/radeon/radeon_gem.c    |  2 +-
 drivers/gpu/drm/radeon/radeon_mn.c     |  2 +-
 drivers/gpu/drm/radeon/radeon_object.c | 30 +++++++++++++++++++++---------
 drivers/gpu/drm/radeon/radeon_ttm.c    | 14 +++++++++-----
 drivers/gpu/drm/radeon/radeon_uvd.c    |  2 +-
 drivers/gpu/drm/radeon/radeon_vce.c    |  2 +-
 10 files changed, 42 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 973df06..e765632 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4026,7 +4026,7 @@  int sumo_rlc_init(struct radeon_device *rdev)
 		/* save restore block */
 		if (rdev->rlc.save_restore_obj == NULL) {
 			r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
-					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+					     RADEON_GEM_DOMAIN_VRAM, RADEON_GEM_CPU_ACCESS, NULL,
 					     NULL, &rdev->rlc.save_restore_obj);
 			if (r) {
 				dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
@@ -4105,7 +4105,7 @@  int sumo_rlc_init(struct radeon_device *rdev)
 
 		if (rdev->rlc.clear_state_obj == NULL) {
 			r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
-					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+					     RADEON_GEM_DOMAIN_VRAM, RADEON_GEM_CPU_ACCESS, NULL,
 					     NULL, &rdev->rlc.clear_state_obj);
 			if (r) {
 				dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
@@ -4182,7 +4182,7 @@  int sumo_rlc_init(struct radeon_device *rdev)
 		if (rdev->rlc.cp_table_obj == NULL) {
 			r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
 					     PAGE_SIZE, true,
-					     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+					     RADEON_GEM_DOMAIN_VRAM, RADEON_GEM_CPU_ACCESS, NULL,
 					     NULL, &rdev->rlc.cp_table_obj);
 			if (r) {
 				dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2fcad34..9e2f2fa 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1431,7 +1431,8 @@  int r600_vram_scratch_init(struct radeon_device *rdev)
 	if (rdev->vram_scratch.robj == NULL) {
 		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
 				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
-				     0, NULL, NULL, &rdev->vram_scratch.robj);
+				     RADEON_GEM_CPU_ACCESS,
+				     NULL, NULL, &rdev->vram_scratch.robj);
 		if (r) {
 			return r;
 		}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5587603..726e89f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2970,7 +2970,8 @@  extern void radeon_surface_init(struct radeon_device *rdev);
 extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
 extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
 extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
-extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
+extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain,
+					     u64 size);
 extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
 extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
 				     uint32_t flags);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 5450fa9..fd1c778 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -128,7 +128,7 @@  int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
 	if (rdev->gart.robj == NULL) {
 		r = radeon_bo_create(rdev, rdev->gart.table_size,
 				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
-				     0, NULL, NULL, &rdev->gart.robj);
+				     RADEON_GEM_CPU_ACCESS, NULL, NULL, &rdev->gart.robj);
 		if (r) {
 			return r;
 		}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ac3c131..d613d0c 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -337,7 +337,7 @@  int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
 			goto release_object;
 		}
 
-		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
+		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT, bo->tbo.mem.size);
 		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 		radeon_bo_unreserve(bo);
 		up_read(&current->mm->mmap_sem);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index a69bd44..e51f09b 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -141,7 +141,7 @@  static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 				DRM_ERROR("(%d) failed to wait for user bo\n", r);
 		}
 
-		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
+		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU, bo->tbo.mem.size);
 		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 		if (r)
 			DRM_ERROR("(%d) failed to validate user bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 43e0994..eee1f9f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -93,7 +93,8 @@  bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
 	return false;
 }
 
-void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
+void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain,
+				      u64 size)
 {
 	u32 c = 0, i;
 
@@ -179,9 +180,18 @@  void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 	 * improve fragmentation quality.
 	 * 512kb was measured as the most optimal number.
 	 */
-	if (rbo->tbo.mem.size > 512 * 1024) {
-		for (i = 0; i < c; i++) {
-			rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
+	if (size > 512 * 1024) {
+		if (domain & RADEON_GEM_DOMAIN_VRAM) {
+			if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) ||
+			    !(rbo->flags & RADEON_GEM_CPU_ACCESS)) {
+				for (i = 0; i < c; i++) {
+					rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
+				}
+			}
+		} else {
+			for (i = 0; i < c; i++) {
+				rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
+			}
 		}
 	}
 }
@@ -252,7 +262,7 @@  int radeon_bo_create(struct radeon_device *rdev,
 	bo->flags &= ~RADEON_GEM_GTT_WC;
 #endif
 
-	radeon_ttm_placement_from_domain(bo, domain);
+	radeon_ttm_placement_from_domain(bo, domain, size);
 	/* Kernel allocation are uninterruptible */
 	down_read(&rdev->pm.mclk_lock);
 	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
@@ -350,7 +360,7 @@  int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
 
 		return 0;
 	}
-	radeon_ttm_placement_from_domain(bo, domain);
+	radeon_ttm_placement_from_domain(bo, domain, bo->tbo.mem.size);
 	for (i = 0; i < bo->placement.num_placement; i++) {
 		/* force to pin into visible video ram */
 		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
@@ -557,7 +567,7 @@  int radeon_bo_list_validate(struct radeon_device *rdev,
 			}
 
 		retry:
-			radeon_ttm_placement_from_domain(bo, domain);
+			radeon_ttm_placement_from_domain(bo, domain, bo->tbo.mem.size);
 			if (ring == R600_RING_TYPE_UVD_INDEX)
 				radeon_uvd_force_into_uvd_segment(bo, allowed);
 
@@ -800,7 +810,8 @@  int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 		return 0;
 
 	/* hurrah the memory is not visible ! */
-	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM,
+					 rbo->tbo.mem.size);
 	lpfn =	rdev->mc.visible_vram_size >> PAGE_SHIFT;
 	for (i = 0; i < rbo->placement.num_placement; i++) {
 		/* Force into visible VRAM */
@@ -810,7 +821,8 @@  int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 	}
 	r = ttm_bo_validate(bo, &rbo->placement, false, false);
 	if (unlikely(r == -ENOMEM)) {
-		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT,
+						 rbo->tbo.mem.size);
 		return ttm_bo_validate(bo, &rbo->placement, false, false);
 	} else if (unlikely(r != 0)) {
 		return r;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d02aa1d..8fbf784 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -197,7 +197,8 @@  static void radeon_evict_flags(struct ttm_buffer_object *bo,
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
 		if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
-			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU,
+							 rbo->tbo.mem.size);
 		else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
 			 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
 			unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
@@ -209,7 +210,8 @@  static void radeon_evict_flags(struct ttm_buffer_object *bo,
 			 * BOs to be evicted from VRAM
 			 */
 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
-							 RADEON_GEM_DOMAIN_GTT);
+							 RADEON_GEM_DOMAIN_GTT,
+							 rbo->tbo.mem.size);
 			rbo->placement.num_busy_placement = 0;
 			for (i = 0; i < rbo->placement.num_placement; i++) {
 				if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
@@ -222,11 +224,13 @@  static void radeon_evict_flags(struct ttm_buffer_object *bo,
 				}
 			}
 		} else
-			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT,
+							 rbo->tbo.mem.size);
 		break;
 	case TTM_PL_TT:
 	default:
-		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU,
+						 rbo->tbo.mem.size);
 	}
 	*placement = rbo->placement;
 }
@@ -888,7 +892,7 @@  int radeon_ttm_init(struct radeon_device *rdev)
 	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
 	r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
-			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+			     RADEON_GEM_DOMAIN_VRAM, RADEON_GEM_CPU_ACCESS, NULL,
 			     NULL, &rdev->stollen_vga_memory);
 	if (r) {
 		return r;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index c10b2ae..52b2682 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -141,7 +141,7 @@  int radeon_uvd_init(struct radeon_device *rdev)
 		  RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
 		  RADEON_GPU_PAGE_SIZE;
 	r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
-			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+			     RADEON_GEM_DOMAIN_VRAM, RADEON_GEM_CPU_ACCESS, NULL,
 			     NULL, &rdev->uvd.vcpu_bo);
 	if (r) {
 		dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 976fe43..3d75502 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -126,7 +126,7 @@  int radeon_vce_init(struct radeon_device *rdev)
 	size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
 	       RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
 	r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
-			     RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL,
+			     RADEON_GEM_DOMAIN_VRAM, RADEON_GEM_CPU_ACCESS, NULL, NULL,
 			     &rdev->vce.vcpu_bo);
 	if (r) {
 		dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
-- 
1.8.3.1