diff mbox

[1/2] drm/i915: Fallback to single PAGE_SIZE segments for DMA remapping

Message ID 20161219124346.550-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Dec. 19, 2016, 12:43 p.m. UTC
If we at first do not succeed with attempting to remap our physical
pages using a coalesced scattergather list, try again with one
scattergather entry per page. This should help with swiotlb as it uses a
limited buffer size and only searches for contiguous chunks within its
buffer aligned up to the next boundary - i.e. we may prematurely cause a
failure as we are unable to utilize the unused space between large
chunks and trigger an error such as:

	 i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes)

Reported-by: Juergen Gross <jgross@suse.com>
Fixes: 871dfbd67d4e ("drm/i915: Allow compaction upto SWIOTLB max segment size")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: <drm-intel-fixes@lists.freedesktop.org>
---
 drivers/gpu/drm/i915/i915_gem.c | 26 ++++++++++++++++++++++----
 1 file changed, 22 insertions(+), 4 deletions(-)

Comments

Daniel Vetter Dec. 20, 2016, 11:02 a.m. UTC | #1
On Mon, Dec 19, 2016 at 12:43:45PM +0000, Chris Wilson wrote:
> If we at first do not succeed with attempting to remap our physical
> pages using a coalesced scattergather list, try again with one
> scattergather entry per page. This should help with swiotlb as it uses a
> limited buffer size and only searches for contiguous chunks within its
> buffer aligned up to the next boundary - i.e. we may prematurely cause a
> failure as we are unable to utilize the unused space between large
> chunks and trigger an error such as:
> 
> 	 i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes)
> 
> Reported-by: Juergen Gross <jgross@suse.com>
> Fixes: 871dfbd67d4e ("drm/i915: Allow compaction upto SWIOTLB max segment size")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Imre Deak <imre.deak@intel.com>
> Cc: <drm-intel-fixes@lists.freedesktop.org>

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Feels a bit funny to call swiotlb_* functions, I'd kinda assume that we
could somehow figure this out from the dma limits instead of leaking
through the dma api abstraction. But that's already there, so meh.
-Daniel

> ---
>  drivers/gpu/drm/i915/i915_gem.c | 26 ++++++++++++++++++++++----
>  1 file changed, 22 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 412f3513f269..4e263df2afc3 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2326,7 +2326,8 @@ static struct sg_table *
>  i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>  {
>  	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> -	int page_count, i;
> +	const unsigned long page_count = obj->base.size / PAGE_SIZE;
> +	unsigned long i;
>  	struct address_space *mapping;
>  	struct sg_table *st;
>  	struct scatterlist *sg;
> @@ -2352,7 +2353,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>  	if (st == NULL)
>  		return ERR_PTR(-ENOMEM);
>  
> -	page_count = obj->base.size / PAGE_SIZE;
> +rebuild_st:
>  	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
>  		kfree(st);
>  		return ERR_PTR(-ENOMEM);
> @@ -2411,8 +2412,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>  	i915_sg_trim(st);
>  
>  	ret = i915_gem_gtt_prepare_pages(obj, st);
> -	if (ret)
> -		goto err_pages;
> +	if (ret) {
> +		/* DMA remapping failed? One possible cause is that
> +		 * it could not reserve enough large entries, asking
> +		 * for PAGE_SIZE chunks instead may be helpful.
> +		 */
> +		if (max_segment > PAGE_SIZE) {
> +			for_each_sgt_page(page, sgt_iter, st)
> +				put_page(page);
> +			sg_free_table(st);
> +
> +			max_segment = PAGE_SIZE;
> +			goto rebuild_st;
> +		} else {
> +			dev_warn(&dev_priv->drm.pdev->dev,
> +				 "Failed to DMA remap %lu pages\n",
> +				 page_count);
> +			goto err_pages;
> +		}
> +	}
>  
>  	if (i915_gem_object_needs_bit17_swizzle(obj))
>  		i915_gem_object_do_bit_17_swizzle(obj, st);
> -- 
> 2.11.0
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Tvrtko Ursulin Dec. 20, 2016, 11:13 a.m. UTC | #2
On 19/12/2016 12:43, Chris Wilson wrote:
> If we at first do not succeed with attempting to remap our physical
> pages using a coalesced scattergather list, try again with one
> scattergather entry per page. This should help with swiotlb as it uses a
> limited buffer size and only searches for contiguous chunks within its
> buffer aligned up to the next boundary - i.e. we may prematurely cause a
> failure as we are unable to utilize the unused space between large
> chunks and trigger an error such as:
>
> 	 i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes)
>
> Reported-by: Juergen Gross <jgross@suse.com>
> Fixes: 871dfbd67d4e ("drm/i915: Allow compaction upto SWIOTLB max segment size")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Imre Deak <imre.deak@intel.com>
> Cc: <drm-intel-fixes@lists.freedesktop.org>
> ---
>  drivers/gpu/drm/i915/i915_gem.c | 26 ++++++++++++++++++++++----
>  1 file changed, 22 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 412f3513f269..4e263df2afc3 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2326,7 +2326,8 @@ static struct sg_table *
>  i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>  {
>  	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> -	int page_count, i;
> +	const unsigned long page_count = obj->base.size / PAGE_SIZE;
> +	unsigned long i;
>  	struct address_space *mapping;
>  	struct sg_table *st;
>  	struct scatterlist *sg;
> @@ -2352,7 +2353,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>  	if (st == NULL)
>  		return ERR_PTR(-ENOMEM);
>
> -	page_count = obj->base.size / PAGE_SIZE;
> +rebuild_st:
>  	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
>  		kfree(st);
>  		return ERR_PTR(-ENOMEM);
> @@ -2411,8 +2412,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>  	i915_sg_trim(st);
>
>  	ret = i915_gem_gtt_prepare_pages(obj, st);
> -	if (ret)
> -		goto err_pages;
> +	if (ret) {
> +		/* DMA remapping failed? One possible cause is that
> +		 * it could not reserve enough large entries, asking
> +		 * for PAGE_SIZE chunks instead may be helpful.
> +		 */
> +		if (max_segment > PAGE_SIZE) {
> +			for_each_sgt_page(page, sgt_iter, st)
> +				put_page(page);
> +			sg_free_table(st);
> +
> +			max_segment = PAGE_SIZE;
> +			goto rebuild_st;
> +		} else {
> +			dev_warn(&dev_priv->drm.pdev->dev,
> +				 "Failed to DMA remap %lu pages\n",
> +				 page_count);
> +			goto err_pages;
> +		}
> +	}
>
>  	if (i915_gem_object_needs_bit17_swizzle(obj))
>  		i915_gem_object_do_bit_17_swizzle(obj, st);
>

How much is the cost of freeing and re-acquiring pages in the fall back 
case? It could be avoidable by using the table and adding something like 
sgt = i915_sg_copy(sgt, table_max_segment). But it depends on how likely 
is this path to be hit on swiotlb platforms. I have no idea. Our 
datasets are much bigger than the swiotlb space - if that is true on 
such platforms?

Regards,

Tvrtko
Chris Wilson Dec. 20, 2016, 11:33 a.m. UTC | #3
On Tue, Dec 20, 2016 at 11:13:43AM +0000, Tvrtko Ursulin wrote:
> 
> On 19/12/2016 12:43, Chris Wilson wrote:
> >If we at first do not succeed with attempting to remap our physical
> >pages using a coalesced scattergather list, try again with one
> >scattergather entry per page. This should help with swiotlb as it uses a
> >limited buffer size and only searches for contiguous chunks within its
> >buffer aligned up to the next boundary - i.e. we may prematurely cause a
> >failure as we are unable to utilize the unused space between large
> >chunks and trigger an error such as:
> >
> >	 i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes)
> >
> >Reported-by: Juergen Gross <jgross@suse.com>
> >Fixes: 871dfbd67d4e ("drm/i915: Allow compaction upto SWIOTLB max segment size")
> >Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> >Cc: Imre Deak <imre.deak@intel.com>
> >Cc: <drm-intel-fixes@lists.freedesktop.org>
> >---
> > drivers/gpu/drm/i915/i915_gem.c | 26 ++++++++++++++++++++++----
> > 1 file changed, 22 insertions(+), 4 deletions(-)
> >
> >diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> >index 412f3513f269..4e263df2afc3 100644
> >--- a/drivers/gpu/drm/i915/i915_gem.c
> >+++ b/drivers/gpu/drm/i915/i915_gem.c
> >@@ -2326,7 +2326,8 @@ static struct sg_table *
> > i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
> > {
> > 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> >-	int page_count, i;
> >+	const unsigned long page_count = obj->base.size / PAGE_SIZE;
> >+	unsigned long i;
> > 	struct address_space *mapping;
> > 	struct sg_table *st;
> > 	struct scatterlist *sg;
> >@@ -2352,7 +2353,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
> > 	if (st == NULL)
> > 		return ERR_PTR(-ENOMEM);
> >
> >-	page_count = obj->base.size / PAGE_SIZE;
> >+rebuild_st:
> > 	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
> > 		kfree(st);
> > 		return ERR_PTR(-ENOMEM);
> >@@ -2411,8 +2412,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
> > 	i915_sg_trim(st);
> >
> > 	ret = i915_gem_gtt_prepare_pages(obj, st);
> >-	if (ret)
> >-		goto err_pages;
> >+	if (ret) {
> >+		/* DMA remapping failed? One possible cause is that
> >+		 * it could not reserve enough large entries, asking
> >+		 * for PAGE_SIZE chunks instead may be helpful.
> >+		 */
> >+		if (max_segment > PAGE_SIZE) {
> >+			for_each_sgt_page(page, sgt_iter, st)
> >+				put_page(page);
> >+			sg_free_table(st);
> >+
> >+			max_segment = PAGE_SIZE;
> >+			goto rebuild_st;
> >+		} else {
> >+			dev_warn(&dev_priv->drm.pdev->dev,
> >+				 "Failed to DMA remap %lu pages\n",
> >+				 page_count);
> >+			goto err_pages;
> >+		}
> >+	}
> >
> > 	if (i915_gem_object_needs_bit17_swizzle(obj))
> > 		i915_gem_object_do_bit_17_swizzle(obj, st);
> >
> 
> How much is the cost of freeing and re-acquiring pages in the fall
> back case? It could be avoidable by using the table and adding
> something like sgt = i915_sg_copy(sgt, table_max_segment). But it
> depends on how likely is this path to be hit on swiotlb platforms. I
> have no idea. Our datasets are much bigger than the swiotlb space -
> if that is true on such platforms?

It's below my level of care (atm). Platforms hitting this are using
swiotlb *bounce* buffers. They will not be able to support a full gfx
workload and be going through a copy. We could avoid the additional
work, the sg_table is large enough for a 1:1 copy if we do it before the
trim, but more importantly we need a simple fix for 4.10.
-Chris
Chris Wilson Dec. 20, 2016, 12:36 p.m. UTC | #4
On Tue, Dec 20, 2016 at 11:33:27AM +0000, Chris Wilson wrote:
> On Tue, Dec 20, 2016 at 11:13:43AM +0000, Tvrtko Ursulin wrote:
> > How much is the cost of freeing and re-acquiring pages in the fall
> > back case? It could be avoidable by using the table and adding
> > something like sgt = i915_sg_copy(sgt, table_max_segment). But it
> > depends on how likely is this path to be hit on swiotlb platforms. I
> > have no idea. Our datasets are much bigger than the swiotlb space -
> > if that is true on such platforms?
> 
> It's below my level of care (atm). Platforms hitting this are using
> swiotlb *bounce* buffers. They will not be able to support a full gfx
> workload and be going through a copy. We could avoid the additional
> work, the sg_table is large enough for a 1:1 copy if we do it before the
> trim, but more importantly we need a simple fix for 4.10.

Pushed this pair as I think this is the safe course of action. Creating
i915_sg_expand() is a job for a rainy day.
-Chris
Tvrtko Ursulin Dec. 20, 2016, 1:38 p.m. UTC | #5
On 20/12/2016 12:36, Chris Wilson wrote:
> On Tue, Dec 20, 2016 at 11:33:27AM +0000, Chris Wilson wrote:
>> On Tue, Dec 20, 2016 at 11:13:43AM +0000, Tvrtko Ursulin wrote:
>>> How much is the cost of freeing and re-acquiring pages in the fall
>>> back case? It could be avoidable by using the table and adding
>>> something like sgt = i915_sg_copy(sgt, table_max_segment). But it
>>> depends on how likely is this path to be hit on swiotlb platforms. I
>>> have no idea. Our datasets are much bigger than the swiotlb space -
>>> if that is true on such platforms?
>>
>> It's below my level of care (atm). Platforms hitting this are using
>> swiotlb *bounce* buffers. They will not be able to support a full gfx
>> workload and be going through a copy. We could avoid the additional
>> work, the sg_table is large enough for a 1:1 copy if we do it before the
>> trim, but more importantly we need a simple fix for 4.10.
>
> Pushed this pair as I think this is the safe course of action. Creating
> i915_sg_expand() is a job for a rainy day.

It would have been very simple and much more elegant in my opinion. But 
I understand Tested-by tag was precious to keep. I'll send a patch 
shortly but it won't be very tested due to time constraints.

Also I don't know why you changed page_count and i to unsigned long when 
the sg API can only handle unsigned int for that.

Regards,

Tvrtko
Chris Wilson Dec. 20, 2016, 1:56 p.m. UTC | #6
On Tue, Dec 20, 2016 at 01:38:16PM +0000, Tvrtko Ursulin wrote:
> 
> On 20/12/2016 12:36, Chris Wilson wrote:
> >On Tue, Dec 20, 2016 at 11:33:27AM +0000, Chris Wilson wrote:
> >>On Tue, Dec 20, 2016 at 11:13:43AM +0000, Tvrtko Ursulin wrote:
> >>>How much is the cost of freeing and re-acquiring pages in the fall
> >>>back case? It could be avoidable by using the table and adding
> >>>something like sgt = i915_sg_copy(sgt, table_max_segment). But it
> >>>depends on how likely is this path to be hit on swiotlb platforms. I
> >>>have no idea. Our datasets are much bigger than the swiotlb space -
> >>>if that is true on such platforms?
> >>
> >>It's below my level of care (atm). Platforms hitting this are using
> >>swiotlb *bounce* buffers. They will not be able to support a full gfx
> >>workload and be going through a copy. We could avoid the additional
> >>work, the sg_table is large enough for a 1:1 copy if we do it before the
> >>trim, but more importantly we need a simple fix for 4.10.
> >
> >Pushed this pair as I think this is the safe course of action. Creating
> >i915_sg_expand() is a job for a rainy day.
> 
> It would have been very simple and much more elegant in my opinion.

I'm ready to be impressed, in my head to do an inplace rewrite was tricky.
:)

> But I understand Tested-by tag was precious to keep. I'll send a
> patch shortly but it won't be very tested due to time constraints.
> 
> Also I don't know why you changed page_count and i to unsigned long
> when the sg API can only handle unsigned int for that.

Primary concern was moving them out of the way and worrying about our
own 64bit object size issues. Hmm, can we reuse

if (overflows_type(pgcount, unsigned int))
	return -E2BIG;

to catch the mismatch?
-Chris
Tvrtko Ursulin Dec. 20, 2016, 2:14 p.m. UTC | #7
On 20/12/2016 13:56, Chris Wilson wrote:
> On Tue, Dec 20, 2016 at 01:38:16PM +0000, Tvrtko Ursulin wrote:
>>
>> On 20/12/2016 12:36, Chris Wilson wrote:
>>> On Tue, Dec 20, 2016 at 11:33:27AM +0000, Chris Wilson wrote:
>>>> On Tue, Dec 20, 2016 at 11:13:43AM +0000, Tvrtko Ursulin wrote:
>>>>> How much is the cost of freeing and re-acquiring pages in the fall
>>>>> back case? It could be avoidable by using the table and adding
>>>>> something like sgt = i915_sg_copy(sgt, table_max_segment). But it
>>>>> depends on how likely is this path to be hit on swiotlb platforms. I
>>>>> have no idea. Our datasets are much bigger than the swiotlb space -
>>>>> if that is true on such platforms?
>>>>
>>>> It's below my level of care (atm). Platforms hitting this are using
>>>> swiotlb *bounce* buffers. They will not be able to support a full gfx
>>>> workload and be going through a copy. We could avoid the additional
>>>> work, the sg_table is large enough for a 1:1 copy if we do it before the
>>>> trim, but more importantly we need a simple fix for 4.10.
>>>
>>> Pushed this pair as I think this is the safe course of action. Creating
>>> i915_sg_expand() is a job for a rainy day.
>>
>> It would have been very simple and much more elegant in my opinion.
>
> I'm ready to be impressed, in my head to do an inplace rewrite was tricky.
> :)

Maybe I've missed something then. We'll see. :)

>> But I understand Tested-by tag was precious to keep. I'll send a
>> patch shortly but it won't be very tested due to time constraints.
>>
>> Also I don't know why you changed page_count and i to unsigned long
>> when the sg API can only handle unsigned int for that.
>
> Primary concern was moving them out of the way and worrying about our
> own 64bit object size issues. Hmm, can we reuse
>
> if (overflows_type(pgcount, unsigned int))
> 	return -E2BIG;
>
> to catch the mismatch?

You have already added that to i915_gem_object_create some time ago! :)

Regards,

Tvrtko
Chris Wilson Dec. 20, 2016, 2:22 p.m. UTC | #8
On Tue, Dec 20, 2016 at 02:14:21PM +0000, Tvrtko Ursulin wrote:
> 
> On 20/12/2016 13:56, Chris Wilson wrote:
> >On Tue, Dec 20, 2016 at 01:38:16PM +0000, Tvrtko Ursulin wrote:
> >>
> >>On 20/12/2016 12:36, Chris Wilson wrote:
> >>>On Tue, Dec 20, 2016 at 11:33:27AM +0000, Chris Wilson wrote:
> >>>>On Tue, Dec 20, 2016 at 11:13:43AM +0000, Tvrtko Ursulin wrote:
> >>>>>How much is the cost of freeing and re-acquiring pages in the fall
> >>>>>back case? It could be avoidable by using the table and adding
> >>>>>something like sgt = i915_sg_copy(sgt, table_max_segment). But it
> >>>>>depends on how likely is this path to be hit on swiotlb platforms. I
> >>>>>have no idea. Our datasets are much bigger than the swiotlb space -
> >>>>>if that is true on such platforms?
> >>>>
> >>>>It's below my level of care (atm). Platforms hitting this are using
> >>>>swiotlb *bounce* buffers. They will not be able to support a full gfx
> >>>>workload and be going through a copy. We could avoid the additional
> >>>>work, the sg_table is large enough for a 1:1 copy if we do it before the
> >>>>trim, but more importantly we need a simple fix for 4.10.
> >>>
> >>>Pushed this pair as I think this is the safe course of action. Creating
> >>>i915_sg_expand() is a job for a rainy day.
> >>
> >>It would have been very simple and much more elegant in my opinion.
> >
> >I'm ready to be impressed, in my head to do an inplace rewrite was tricky.
> >:)
> 
> Maybe I've missed something then. We'll see. :)
> 
> >>But I understand Tested-by tag was precious to keep. I'll send a
> >>patch shortly but it won't be very tested due to time constraints.
> >>
> >>Also I don't know why you changed page_count and i to unsigned long
> >>when the sg API can only handle unsigned int for that.
> >
> >Primary concern was moving them out of the way and worrying about our
> >own 64bit object size issues. Hmm, can we reuse
> >
> >if (overflows_type(pgcount, unsigned int))
> >	return -E2BIG;
> >
> >to catch the mismatch?
> 
> You have already added that to i915_gem_object_create some time ago! :)

I know, I'm thinking ahead of documenting the types around the place so
that we can start actually preparing for huge objects.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 412f3513f269..4e263df2afc3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2326,7 +2326,8 @@  static struct sg_table *
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	int page_count, i;
+	const unsigned long page_count = obj->base.size / PAGE_SIZE;
+	unsigned long i;
 	struct address_space *mapping;
 	struct sg_table *st;
 	struct scatterlist *sg;
@@ -2352,7 +2353,7 @@  i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	if (st == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	page_count = obj->base.size / PAGE_SIZE;
+rebuild_st:
 	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
 		kfree(st);
 		return ERR_PTR(-ENOMEM);
@@ -2411,8 +2412,25 @@  i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	i915_sg_trim(st);
 
 	ret = i915_gem_gtt_prepare_pages(obj, st);
-	if (ret)
-		goto err_pages;
+	if (ret) {
+		/* DMA remapping failed? One possible cause is that
+		 * it could not reserve enough large entries, asking
+		 * for PAGE_SIZE chunks instead may be helpful.
+		 */
+		if (max_segment > PAGE_SIZE) {
+			for_each_sgt_page(page, sgt_iter, st)
+				put_page(page);
+			sg_free_table(st);
+
+			max_segment = PAGE_SIZE;
+			goto rebuild_st;
+		} else {
+			dev_warn(&dev_priv->drm.pdev->dev,
+				 "Failed to DMA remap %lu pages\n",
+				 page_count);
+			goto err_pages;
+		}
+	}
 
 	if (i915_gem_object_needs_bit17_swizzle(obj))
 		i915_gem_object_do_bit_17_swizzle(obj, st);