@@ -44,6 +44,7 @@
struct dma_pool { /* the pool */
struct xarray pages;
spinlock_t lock;
+ void *next_block;
size_t size;
struct device *dev;
size_t allocation;
@@ -56,7 +57,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
void *vaddr;
dma_addr_t dma;
unsigned int in_use;
- unsigned int offset;
};
static DEFINE_MUTEX(pools_lock);
@@ -140,8 +140,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
if (size == 0)
return NULL;
- else if (size < 4)
- size = 4;
+ else if (size < sizeof(void *))
+ size = sizeof(void *);
size = ALIGN(size, align);
allocation = max_t(size_t, size, PAGE_SIZE);
@@ -164,6 +164,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
retval->size = size;
retval->boundary = boundary;
retval->allocation = allocation;
+ retval->next_block = NULL;
INIT_LIST_HEAD(&retval->pools);
@@ -201,18 +202,25 @@ EXPORT_SYMBOL(dma_pool_create);
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
- unsigned int offset = 0;
unsigned int next_boundary = pool->boundary;
+ unsigned int offset = pool->size;
+ void **v = page->vaddr;
+ void *next;
- do {
- unsigned int next = offset + pool->size;
- if (unlikely((next + pool->size) >= next_boundary)) {
- next = next_boundary;
+ while (offset < pool->allocation) {
+ if (offset + pool->size >= next_boundary) {
+ offset = next_boundary;
next_boundary += pool->boundary;
+ continue;
}
- *(int *)(page->vaddr + offset) = next;
- offset = next;
- } while (offset < pool->allocation);
+
+ next = page->vaddr + offset;
+ *v = next;
+ v = next;
+
+ offset += pool->size;
+ }
+ *v = NULL;
}
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
@@ -230,7 +238,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
#endif
pool_initialise_page(pool, page);
page->in_use = 0;
- page->offset = 0;
} else {
kfree(page);
page = NULL;
@@ -301,6 +308,11 @@ void dma_pool_destroy(struct dma_pool *pool)
}
EXPORT_SYMBOL(dma_pool_destroy);
+static struct dma_page *pool_find_page(struct dma_pool *pool, unsigned long vaddr)
+{
+ return xa_load(&pool->pages, vaddr & ~(pool->allocation - 1));
+}
+
/**
* dma_pool_alloc - get a block of consistent memory
* @pool: dma pool that will produce the block
@@ -316,16 +328,16 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
{
unsigned long flags;
struct dma_page *page;
- unsigned long i;
size_t offset;
void *retval;
might_alloc(mem_flags);
spin_lock_irqsave(&pool->lock, flags);
- xa_for_each(&pool->pages, i, page) {
- if (page->offset < pool->allocation)
- goto ready;
+ retval = pool->next_block;
+ if (retval) {
+ page = pool_find_page(pool, (unsigned long)retval);
+ goto ready;
}
/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
@@ -335,21 +347,26 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
if (!page)
return NULL;
- xa_store(&pool->pages, page->vaddr, page, mem_flags);
+ xa_store(&pool->pages, (unsigned long)page->vaddr, page,
+ mem_flags);
+
spin_lock_irqsave(&pool->lock, flags);
+ *(void **)(page->vaddr + pool->allocation - pool->size) =
+ pool->next_block;
+ pool->next_block = page->vaddr;
+ retval = pool->next_block;
ready:
page->in_use++;
- offset = page->offset;
- page->offset = *(int *)(page->vaddr + offset);
- retval = offset + page->vaddr;
+ pool->next_block = *(void **)retval;
+ offset = retval - page->vaddr;
*handle = offset + page->dma;
#ifdef DMAPOOL_DEBUG
{
int i;
u8 *data = retval;
- /* page->offset is stored in first 4 bytes */
- for (i = sizeof(page->offset); i < pool->size; i++) {
+ /* next block link is stored in first pointer bytes */
+ for (i = sizeof(void *); i < pool->size; i++) {
if (data[i] == POOL_POISON_FREED)
continue;
if (pool->dev)
@@ -380,11 +397,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
}
EXPORT_SYMBOL(dma_pool_alloc);
-static struct dma_page *pool_find_page(struct dma_pool *pool, unsigned long vaddr)
-{
- return xa_load(pool->pages, vaddr & ~(pool->allocation - 1));
-}
-
/**
* dma_pool_free - put block back into dma pool
* @pool: the dma pool holding the block
@@ -401,7 +413,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
unsigned int offset;
spin_lock_irqsave(&pool->lock, flags);
- page = pool_find_page(pool, vaddr);
+ page = pool_find_page(pool, (unsigned long)vaddr);
if (!page) {
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
@@ -428,10 +440,10 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
return;
}
{
- unsigned int chain = page->offset;
- while (chain < pool->allocation) {
- if (chain != offset) {
- chain = *(int *)(page->vaddr + chain);
+ void *v = pool->next_block;
+ while (v) {
+ if (v != vaddr) {
+ v = *(void **)v;
continue;
}
spin_unlock_irqrestore(&pool->lock, flags);
@@ -448,8 +460,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
#endif
page->in_use--;
- *(int *)vaddr = page->offset;
- page->offset = offset;
+ *(void **)vaddr = pool->next_block;
+ pool->next_block = vaddr;
/*
* Resist a temptation to do
* if (!is_page_busy(page)) pool_free_page(pool, page);