Message ID | 1499954169-3209-2-git-send-email-deathsimple@vodafone.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Thu, Jul 13, 2017 at 9:56 AM, Christian König <deathsimple@vodafone.de> wrote: > From: Christian König <christian.koenig@amd.com> > > Remove unused defines and variables. Also stop computing the > gfp_flags when they aren't used. > > No intended functional change. > > Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> > --- > drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 42 ++++++++++++-------------------- > 1 file changed, 16 insertions(+), 26 deletions(-) > > diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c > index 6c38046..2081e20 100644 > --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c > +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c > @@ -57,22 +57,15 @@ > #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) > #define SMALL_ALLOCATION 4 > #define FREE_ALL_PAGES (~0U) > -/* times are in msecs */ > -#define IS_UNDEFINED (0) > -#define IS_WC (1<<1) > -#define IS_UC (1<<2) > -#define IS_CACHED (1<<3) > -#define IS_DMA32 (1<<4) > > enum pool_type { > - POOL_IS_UNDEFINED, > - POOL_IS_WC = IS_WC, > - POOL_IS_UC = IS_UC, > - POOL_IS_CACHED = IS_CACHED, > - POOL_IS_WC_DMA32 = IS_WC | IS_DMA32, > - POOL_IS_UC_DMA32 = IS_UC | IS_DMA32, > - POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32, > + IS_UNDEFINED = 0, > + IS_WC = 1 << 1, > + IS_UC = 1 << 2, > + IS_CACHED = 1 << 3, > + IS_DMA32 = 1 << 4 > }; > + > /* > * The pool structure. There are usually six pools: > * - generic (not restricted to DMA32): > @@ -83,11 +76,9 @@ enum pool_type { > * The other ones can be shrunk by the shrinker API if neccessary. > * @pools: The 'struct device->dma_pools' link. > * @type: Type of the pool > - * @lock: Protects the inuse_list and free_list from concurrnet access. Must be > + * @lock: Protects the free_list from concurrnet access. Must be > * used with irqsave/irqrestore variants because pool allocator maybe called > * from delayed work. > - * @inuse_list: Pool of pages that are in use. The order is very important and > - * it is in the order that the TTM pages that are put back are in. > * @free_list: Pool of pages that are free to be used. No order requirements. > * @dev: The device that is associated with these pools. > * @size: Size used during DMA allocation. > @@ -104,7 +95,6 @@ struct dma_pool { > struct list_head pools; /* The 'struct device->dma_pools link */ > enum pool_type type; > spinlock_t lock; > - struct list_head inuse_list; > struct list_head free_list; > struct device *dev; > unsigned size; > @@ -606,7 +596,6 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags, > sec_pool->pool = pool; > > INIT_LIST_HEAD(&pool->free_list); > - INIT_LIST_HEAD(&pool->inuse_list); > INIT_LIST_HEAD(&pool->pools); > spin_lock_init(&pool->lock); > pool->dev = dev; > @@ -879,22 +868,23 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) > struct dma_pool *pool; > enum pool_type type; > unsigned i; > - gfp_t gfp_flags; > int ret; > > if (ttm->state != tt_unpopulated) > return 0; > > type = ttm_to_type(ttm->page_flags, ttm->caching_state); > - if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) > - gfp_flags = GFP_USER | GFP_DMA32; > - else > - gfp_flags = GFP_HIGHUSER; > - if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) > - gfp_flags |= __GFP_ZERO; > - > pool = ttm_dma_find_pool(dev, type); > if (!pool) { > + gfp_t gfp_flags; > + > + if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) > + gfp_flags = GFP_USER | GFP_DMA32; > + else > + gfp_flags = GFP_HIGHUSER; > + if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) > + gfp_flags |= __GFP_ZERO; > + > pool = ttm_dma_pool_init(dev, gfp_flags, type); > if (IS_ERR_OR_NULL(pool)) { > return -ENOMEM; > -- > 2.7.4 > > _______________________________________________ > dri-devel mailing list > dri-devel@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 6c38046..2081e20 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -57,22 +57,15 @@ #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) #define SMALL_ALLOCATION 4 #define FREE_ALL_PAGES (~0U) -/* times are in msecs */ -#define IS_UNDEFINED (0) -#define IS_WC (1<<1) -#define IS_UC (1<<2) -#define IS_CACHED (1<<3) -#define IS_DMA32 (1<<4) enum pool_type { - POOL_IS_UNDEFINED, - POOL_IS_WC = IS_WC, - POOL_IS_UC = IS_UC, - POOL_IS_CACHED = IS_CACHED, - POOL_IS_WC_DMA32 = IS_WC | IS_DMA32, - POOL_IS_UC_DMA32 = IS_UC | IS_DMA32, - POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32, + IS_UNDEFINED = 0, + IS_WC = 1 << 1, + IS_UC = 1 << 2, + IS_CACHED = 1 << 3, + IS_DMA32 = 1 << 4 }; + /* * The pool structure. There are usually six pools: * - generic (not restricted to DMA32): @@ -83,11 +76,9 @@ enum pool_type { * The other ones can be shrunk by the shrinker API if neccessary. * @pools: The 'struct device->dma_pools' link. * @type: Type of the pool - * @lock: Protects the inuse_list and free_list from concurrnet access. Must be + * @lock: Protects the free_list from concurrnet access. Must be * used with irqsave/irqrestore variants because pool allocator maybe called * from delayed work. - * @inuse_list: Pool of pages that are in use. The order is very important and - * it is in the order that the TTM pages that are put back are in. * @free_list: Pool of pages that are free to be used. No order requirements. * @dev: The device that is associated with these pools. * @size: Size used during DMA allocation. @@ -104,7 +95,6 @@ struct dma_pool { struct list_head pools; /* The 'struct device->dma_pools link */ enum pool_type type; spinlock_t lock; - struct list_head inuse_list; struct list_head free_list; struct device *dev; unsigned size; @@ -606,7 +596,6 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags, sec_pool->pool = pool; INIT_LIST_HEAD(&pool->free_list); - INIT_LIST_HEAD(&pool->inuse_list); INIT_LIST_HEAD(&pool->pools); spin_lock_init(&pool->lock); pool->dev = dev; @@ -879,22 +868,23 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) struct dma_pool *pool; enum pool_type type; unsigned i; - gfp_t gfp_flags; int ret; if (ttm->state != tt_unpopulated) return 0; type = ttm_to_type(ttm->page_flags, ttm->caching_state); - if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) - gfp_flags = GFP_USER | GFP_DMA32; - else - gfp_flags = GFP_HIGHUSER; - if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) - gfp_flags |= __GFP_ZERO; - pool = ttm_dma_find_pool(dev, type); if (!pool) { + gfp_t gfp_flags; + + if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) + gfp_flags = GFP_USER | GFP_DMA32; + else + gfp_flags = GFP_HIGHUSER; + if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) + gfp_flags |= __GFP_ZERO; + pool = ttm_dma_pool_init(dev, gfp_flags, type); if (IS_ERR_OR_NULL(pool)) { return -ENOMEM;