diff mbox series

[for-6.12] ALSA: memalloc: Drop Xen PV workaround again

Message ID 20240910113100.32542-1-tiwai@suse.de (mailing list archive)
State New
Headers show
Series [for-6.12] ALSA: memalloc: Drop Xen PV workaround again | expand

Commit Message

Takashi Iwai Sept. 10, 2024, 11:30 a.m. UTC
Since recently in the commit e469e2045f1b ("ALSA: memalloc: Let IOMMU
handle S/G primarily"), the SG buffer allocation code was modified to
use the standard DMA code primarily and the fallback is applied only
limitedly.  This made the Xen PV specific workarounds we took in the
commit 53466ebdec61 ("ALSA: memalloc: Workaround for Xen PV") rather
superfluous.

It was a hackish workaround for the regression at that time, and it
seems that it's causing another issues (reportedly memory
corruptions).  So it's better to clean it up, after all.

Link: https://lore.kernel.org/20240906184209.25423-1-ariadne@ariadne.space
Cc: Ariadne Conill <ariadne@ariadne.space>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
---
 sound/core/memalloc.c | 19 +++----------------
 1 file changed, 3 insertions(+), 16 deletions(-)


base-commit: 7e4d4b32ab9532bd1babcd5d0763d727ebb04be0
diff mbox series

Patch

diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index f3ad9f85adf1..1d931c24fa35 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -657,7 +657,6 @@  static const struct snd_malloc_ops snd_dma_noncontig_ops = {
 #ifdef CONFIG_SND_DMA_SGBUF
 /* Fallback SG-buffer allocations for x86 */
 struct snd_dma_sg_fallback {
-	bool use_dma_alloc_coherent;
 	size_t count;
 	struct page **pages;
 	/* DMA address array; the first page contains #pages in ~PAGE_MASK */
@@ -677,13 +676,8 @@  static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
 			size = sgbuf->addrs[i] & ~PAGE_MASK;
 			if (WARN_ON(!size))
 				break;
-			if (sgbuf->use_dma_alloc_coherent)
-				dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
-						  page_address(sgbuf->pages[i]),
-						  sgbuf->addrs[i] & PAGE_MASK);
-			else
-				do_free_pages(page_address(sgbuf->pages[i]),
-					      size << PAGE_SHIFT, false);
+			do_free_pages(page_address(sgbuf->pages[i]),
+				      size << PAGE_SHIFT, false);
 			i += size;
 		}
 	}
@@ -705,7 +699,6 @@  static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
 	sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
 	if (!sgbuf)
 		return NULL;
-	sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV);
 	size = PAGE_ALIGN(size);
 	sgbuf->count = size >> PAGE_SHIFT;
 	sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
@@ -718,10 +711,7 @@  static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
 	chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
 	while (size > 0) {
 		chunk = min(size, chunk);
-		if (sgbuf->use_dma_alloc_coherent)
-			p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
-		else
-			p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
+		p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
 		if (!p) {
 			if (chunk <= PAGE_SIZE)
 				goto error;
@@ -793,9 +783,6 @@  static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
 	int type = dmab->dev.type;
 	void *p;
 
-	if (cpu_feature_enabled(X86_FEATURE_XENPV))
-		return snd_dma_sg_fallback_alloc(dmab, size);
-
 	/* try the standard DMA API allocation at first */
 	if (type == SNDRV_DMA_TYPE_DEV_WC_SG)
 		dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC;