@@ -33,7 +33,6 @@
struct cma cma_areas[MAX_CMA_AREAS];
unsigned cma_area_count;
-static DEFINE_MUTEX(cma_mutex);
phys_addr_t cma_get_base(const struct cma *cma)
{
@@ -111,6 +110,7 @@ static void __init cma_activate_area(struct cma *cma)
init_cma_reserved_pageblock(pfn_to_page(pfn));
spin_lock_init(&cma->lock);
+ mutex_init(&cma->cma_mutex);
#ifdef CONFIG_CMA_DEBUGFS
INIT_HLIST_HEAD(&cma->mem_head);
@@ -460,10 +460,10 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
spin_unlock_irq(&cma->lock);
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
- mutex_lock(&cma_mutex);
+ mutex_lock(&cma->cma_mutex);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
- mutex_unlock(&cma_mutex);
+ mutex_unlock(&cma->cma_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
break;
@@ -16,6 +16,7 @@ struct cma {
unsigned long *bitmap;
unsigned int order_per_bit; /* Order of pages represented by one bit */
spinlock_t lock;
+ struct mutex cma_mutex;
#ifdef CONFIG_CMA_DEBUGFS
struct hlist_head mem_head;
spinlock_t mem_head_lock;
Concurrent cma_alloc() requests might race with each other, inside the same CMA heap area. These races could not happen between two different CMA areas as their pfn ranges will be non overlapping. Let's just reduce the scope of the 'cma_mutex' from being system wide to CMA heap area specific, allowing concurrent cma_alloc() requests in different CMA heap areas. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Minchan Kim <minchan@kernel.org> Cc: linux-mm@kvack.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> --- This patch is slightly tested, although wanted to get some initial feedback on whether the scope of this CMA mutex should be reduced down in principle. mm/cma.c | 6 +++--- mm/cma.h | 1 + 2 files changed, 4 insertions(+), 3 deletions(-)