diff mbox series

[2/2] drm/ttm: stop dangerous caching attribute change

Message ID 20200918150109.29469-2-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series [1/2] drm/ttm: remove nonsense AGP handling | expand

Commit Message

Christian König Sept. 18, 2020, 3:01 p.m. UTC
When we swapout/in a BO we try to change the caching
attributes of the pages before/after doing the copy.

On x86 this is done by calling set_pages_uc(),
set_memory_wc() or set_pages_wb() for not highmem pages
to update the linear mapping of the page.

On all other platforms we do exactly nothing.

Now on x86 this is unnecessary because copy_highpage() will
either create a temporary mapping of the page which is wb
anyway and destroyed immediately again or use the linear
mapping with the correct caching attributes.

So stop this nonsense and just keep the caching as it is and
return an error when a driver tries to change the caching of
an already populated TT object.

This is much more defensive since changing caching
attributes is platform and driver specific and usually
doesn't work after the page was initially allocated.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c     |  5 +--
 drivers/gpu/drm/ttm/ttm_tt.c     | 71 ++------------------------------
 include/drm/ttm/ttm_set_memory.h | 22 ----------
 3 files changed, 5 insertions(+), 93 deletions(-)

Comments

Alex Deucher Sept. 22, 2020, 3:15 p.m. UTC | #1
On Fri, Sep 18, 2020 at 11:01 AM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> When we swapout/in a BO we try to change the caching
> attributes of the pages before/after doing the copy.
>
> On x86 this is done by calling set_pages_uc(),
> set_memory_wc() or set_pages_wb() for not highmem pages
> to update the linear mapping of the page.
>
> On all other platforms we do exactly nothing.
>
> Now on x86 this is unnecessary because copy_highpage() will
> either create a temporary mapping of the page which is wb
> anyway and destroyed immediately again or use the linear
> mapping with the correct caching attributes.
>
> So stop this nonsense and just keep the caching as it is and
> return an error when a driver tries to change the caching of
> an already populated TT object.
>
> This is much more defensive since changing caching
> attributes is platform and driver specific and usually
> doesn't work after the page was initially allocated.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

Series is:
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/ttm/ttm_bo.c     |  5 +--
>  drivers/gpu/drm/ttm/ttm_tt.c     | 71 ++------------------------------
>  include/drm/ttm/ttm_set_memory.h | 22 ----------
>  3 files changed, 5 insertions(+), 93 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 1441bc86ac1c..1fa8d87c13ce 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -1555,14 +1555,13 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
>          * Move to system cached
>          */
>
> -       if (bo->mem.mem_type != TTM_PL_SYSTEM ||
> -           bo->ttm->caching_state != tt_cached) {
> +       if (bo->mem.mem_type != TTM_PL_SYSTEM) {
>                 struct ttm_operation_ctx ctx = { false, false };
>                 struct ttm_resource evict_mem;
>
>                 evict_mem = bo->mem;
>                 evict_mem.mm_node = NULL;
> -               evict_mem.placement = TTM_PL_FLAG_CACHED;
> +               evict_mem.placement = TTM_PL_MASK_CACHING;
>                 evict_mem.mem_type = TTM_PL_SYSTEM;
>
>                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
> diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
> index 014fb3656407..7c278216a188 100644
> --- a/drivers/gpu/drm/ttm/ttm_tt.c
> +++ b/drivers/gpu/drm/ttm/ttm_tt.c
> @@ -38,7 +38,6 @@
>  #include <drm/drm_cache.h>
>  #include <drm/ttm/ttm_bo_driver.h>
>  #include <drm/ttm/ttm_page_alloc.h>
> -#include <drm/ttm/ttm_set_memory.h>
>
>  /**
>   * Allocates a ttm structure for the given BO.
> @@ -115,81 +114,19 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
>         return 0;
>  }
>
> -static int ttm_tt_set_page_caching(struct page *p,
> -                                  enum ttm_caching_state c_old,
> -                                  enum ttm_caching_state c_new)
> -{
> -       int ret = 0;
> -
> -       if (PageHighMem(p))
> -               return 0;
> -
> -       if (c_old != tt_cached) {
> -               /* p isn't in the default caching state, set it to
> -                * writeback first to free its current memtype. */
> -
> -               ret = ttm_set_pages_wb(p, 1);
> -               if (ret)
> -                       return ret;
> -       }
> -
> -       if (c_new == tt_wc)
> -               ret = ttm_set_pages_wc(p, 1);
> -       else if (c_new == tt_uncached)
> -               ret = ttm_set_pages_uc(p, 1);
> -
> -       return ret;
> -}
> -
> -/*
> - * Change caching policy for the linear kernel map
> - * for range of pages in a ttm.
> - */
> -
>  static int ttm_tt_set_caching(struct ttm_tt *ttm,
>                               enum ttm_caching_state c_state)
>  {
> -       int i, j;
> -       struct page *cur_page;
> -       int ret;
> -
>         if (ttm->caching_state == c_state)
>                 return 0;
>
> -       if (!ttm_tt_is_populated(ttm)) {
> -               /* Change caching but don't populate */
> -               ttm->caching_state = c_state;
> -               return 0;
> -       }
> -
> -       if (ttm->caching_state == tt_cached)
> -               drm_clflush_pages(ttm->pages, ttm->num_pages);
> -
> -       for (i = 0; i < ttm->num_pages; ++i) {
> -               cur_page = ttm->pages[i];
> -               if (likely(cur_page != NULL)) {
> -                       ret = ttm_tt_set_page_caching(cur_page,
> -                                                     ttm->caching_state,
> -                                                     c_state);
> -                       if (unlikely(ret != 0))
> -                               goto out_err;
> -               }
> -       }
> +       /* Can't change the caching state after TT is populated */
> +       if (WARN_ON_ONCE(ttm_tt_is_populated(ttm)))
> +               return -EINVAL;
>
>         ttm->caching_state = c_state;
>
>         return 0;
> -
> -out_err:
> -       for (j = 0; j < i; ++j) {
> -               cur_page = ttm->pages[j];
> -               if (likely(cur_page != NULL)) {
> -                       (void)ttm_tt_set_page_caching(cur_page, c_state,
> -                                                     ttm->caching_state);
> -               }
> -       }
> -
> -       return ret;
>  }
>
>  int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
> @@ -353,8 +290,6 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
>         gfp_t gfp_mask;
>         int i, ret;
>
> -       BUG_ON(ttm->caching_state != tt_cached);
> -
>         swap_storage = shmem_file_setup("ttm swap",
>                                         ttm->num_pages << PAGE_SHIFT,
>                                         0);
> diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h
> index 3966655b72f1..2343c18a6133 100644
> --- a/include/drm/ttm/ttm_set_memory.h
> +++ b/include/drm/ttm/ttm_set_memory.h
> @@ -57,18 +57,6 @@ static inline int ttm_set_pages_wb(struct page *page, int numpages)
>         return set_pages_wb(page, numpages);
>  }
>
> -static inline int ttm_set_pages_wc(struct page *page, int numpages)
> -{
> -       unsigned long addr = (unsigned long)page_address(page);
> -
> -       return set_memory_wc(addr, numpages);
> -}
> -
> -static inline int ttm_set_pages_uc(struct page *page, int numpages)
> -{
> -       return set_pages_uc(page, numpages);
> -}
> -
>  #else /* for CONFIG_X86 */
>
>  static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
> @@ -91,16 +79,6 @@ static inline int ttm_set_pages_wb(struct page *page, int numpages)
>         return 0;
>  }
>
> -static inline int ttm_set_pages_wc(struct page *page, int numpages)
> -{
> -       return 0;
> -}
> -
> -static inline int ttm_set_pages_uc(struct page *page, int numpages)
> -{
> -       return 0;
> -}
> -
>  #endif /* for CONFIG_X86 */
>
>  #endif
> --
> 2.17.1
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff mbox series

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1441bc86ac1c..1fa8d87c13ce 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1555,14 +1555,13 @@  int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
 	 * Move to system cached
 	 */
 
-	if (bo->mem.mem_type != TTM_PL_SYSTEM ||
-	    bo->ttm->caching_state != tt_cached) {
+	if (bo->mem.mem_type != TTM_PL_SYSTEM) {
 		struct ttm_operation_ctx ctx = { false, false };
 		struct ttm_resource evict_mem;
 
 		evict_mem = bo->mem;
 		evict_mem.mm_node = NULL;
-		evict_mem.placement = TTM_PL_FLAG_CACHED;
+		evict_mem.placement = TTM_PL_MASK_CACHING;
 		evict_mem.mem_type = TTM_PL_SYSTEM;
 
 		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 014fb3656407..7c278216a188 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -38,7 +38,6 @@ 
 #include <drm/drm_cache.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_set_memory.h>
 
 /**
  * Allocates a ttm structure for the given BO.
@@ -115,81 +114,19 @@  static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
 	return 0;
 }
 
-static int ttm_tt_set_page_caching(struct page *p,
-				   enum ttm_caching_state c_old,
-				   enum ttm_caching_state c_new)
-{
-	int ret = 0;
-
-	if (PageHighMem(p))
-		return 0;
-
-	if (c_old != tt_cached) {
-		/* p isn't in the default caching state, set it to
-		 * writeback first to free its current memtype. */
-
-		ret = ttm_set_pages_wb(p, 1);
-		if (ret)
-			return ret;
-	}
-
-	if (c_new == tt_wc)
-		ret = ttm_set_pages_wc(p, 1);
-	else if (c_new == tt_uncached)
-		ret = ttm_set_pages_uc(p, 1);
-
-	return ret;
-}
-
-/*
- * Change caching policy for the linear kernel map
- * for range of pages in a ttm.
- */
-
 static int ttm_tt_set_caching(struct ttm_tt *ttm,
 			      enum ttm_caching_state c_state)
 {
-	int i, j;
-	struct page *cur_page;
-	int ret;
-
 	if (ttm->caching_state == c_state)
 		return 0;
 
-	if (!ttm_tt_is_populated(ttm)) {
-		/* Change caching but don't populate */
-		ttm->caching_state = c_state;
-		return 0;
-	}
-
-	if (ttm->caching_state == tt_cached)
-		drm_clflush_pages(ttm->pages, ttm->num_pages);
-
-	for (i = 0; i < ttm->num_pages; ++i) {
-		cur_page = ttm->pages[i];
-		if (likely(cur_page != NULL)) {
-			ret = ttm_tt_set_page_caching(cur_page,
-						      ttm->caching_state,
-						      c_state);
-			if (unlikely(ret != 0))
-				goto out_err;
-		}
-	}
+	/* Can't change the caching state after TT is populated */
+	if (WARN_ON_ONCE(ttm_tt_is_populated(ttm)))
+		return -EINVAL;
 
 	ttm->caching_state = c_state;
 
 	return 0;
-
-out_err:
-	for (j = 0; j < i; ++j) {
-		cur_page = ttm->pages[j];
-		if (likely(cur_page != NULL)) {
-			(void)ttm_tt_set_page_caching(cur_page, c_state,
-						      ttm->caching_state);
-		}
-	}
-
-	return ret;
 }
 
 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
@@ -353,8 +290,6 @@  int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
 	gfp_t gfp_mask;
 	int i, ret;
 
-	BUG_ON(ttm->caching_state != tt_cached);
-
 	swap_storage = shmem_file_setup("ttm swap",
 					ttm->num_pages << PAGE_SHIFT,
 					0);
diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h
index 3966655b72f1..2343c18a6133 100644
--- a/include/drm/ttm/ttm_set_memory.h
+++ b/include/drm/ttm/ttm_set_memory.h
@@ -57,18 +57,6 @@  static inline int ttm_set_pages_wb(struct page *page, int numpages)
 	return set_pages_wb(page, numpages);
 }
 
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
-	unsigned long addr = (unsigned long)page_address(page);
-
-	return set_memory_wc(addr, numpages);
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
-	return set_pages_uc(page, numpages);
-}
-
 #else /* for CONFIG_X86 */
 
 static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
@@ -91,16 +79,6 @@  static inline int ttm_set_pages_wb(struct page *page, int numpages)
 	return 0;
 }
 
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
-	return 0;
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
-	return 0;
-}
-
 #endif /* for CONFIG_X86 */
 
 #endif