diff mbox series

[2/2] xen/swiotlb: don't destroy contiguous region in all cases

Message ID 20250211120432.29493-3-jgross@suse.com (mailing list archive)
State New
Headers show
Series xen/swiotlb: one fix and one optimization | expand

Commit Message

Juergen Gross Feb. 11, 2025, 12:04 p.m. UTC
In case xen_swiotlb_alloc_coherent() needed to create a contiguous
region only for other reason than the memory not being compliant with
the device's DMA mask, there is no reason why this contiguous region
should be destroyed by xen_swiotlb_free_coherent() later. Destroying
this region should be done only, if the memory of the region was
allocated with more stringent placement requirements than the memory
it did replace.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 arch/x86/include/asm/xen/swiotlb-xen.h |  5 +++--
 arch/x86/xen/mmu_pv.c                  | 18 ++++++++++++------
 drivers/xen/swiotlb-xen.c              | 11 +++++++----
 3 files changed, 22 insertions(+), 12 deletions(-)

Comments

Stefano Stabellini Feb. 12, 2025, 1:30 a.m. UTC | #1
On Tue, 11 Feb 2025, Juergen Gross wrote:
> In case xen_swiotlb_alloc_coherent() needed to create a contiguous
> region only for other reason than the memory not being compliant with
> the device's DMA mask, there is no reason why this contiguous region
> should be destroyed by xen_swiotlb_free_coherent() later. Destroying
> this region should be done only, if the memory of the region was
> allocated with more stringent placement requirements than the memory
> it did replace.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
>  arch/x86/include/asm/xen/swiotlb-xen.h |  5 +++--
>  arch/x86/xen/mmu_pv.c                  | 18 ++++++++++++------
>  drivers/xen/swiotlb-xen.c              | 11 +++++++----
>  3 files changed, 22 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h
> index abde0f44df57..a353f20c7e79 100644
> --- a/arch/x86/include/asm/xen/swiotlb-xen.h
> +++ b/arch/x86/include/asm/xen/swiotlb-xen.h
> @@ -4,8 +4,9 @@
>  
>  int xen_swiotlb_fixup(void *buf, unsigned long nslabs);
>  int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
> -				unsigned int address_bits,
> -				dma_addr_t *dma_handle);
> +				 unsigned int address_bits,
> +				 dma_addr_t *dma_handle,
> +				 unsigned int *address_bits_in);
>  void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
>  
>  #endif /* _ASM_X86_SWIOTLB_XEN_H */
> diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
> index 2c70cd35e72c..fb586238f7c4 100644
> --- a/arch/x86/xen/mmu_pv.c
> +++ b/arch/x86/xen/mmu_pv.c
> @@ -2208,19 +2208,22 @@ void __init xen_init_mmu_ops(void)
>  static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
>  
>  #define VOID_PTE (mfn_pte(0, __pgprot(0)))
> -static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
> -				unsigned long *in_frames,
> -				unsigned long *out_frames)
> +static int xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
> +			     unsigned long *in_frames,
> +			     unsigned long *out_frames)
>  {
>  	int i;
> +	u64 address_bits = 0;
>  	struct multicall_space mcs;
>  
>  	xen_mc_batch();
>  	for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
>  		mcs = __xen_mc_entry(0);
>  
> -		if (in_frames)
> +		if (in_frames) {
>  			in_frames[i] = virt_to_mfn((void *)vaddr);
> +			address_bits |= in_frames[i] << PAGE_SHIFT;
> +		}
>  
>  		MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
>  		__set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY);
> @@ -2229,6 +2232,8 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
>  			out_frames[i] = virt_to_pfn((void *)vaddr);
>  	}
>  	xen_mc_issue(0);
> +
> +	return fls64(address_bits);
>  }
>  
>  /*
> @@ -2321,7 +2326,8 @@ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
>  
>  int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
>  				 unsigned int address_bits,
> -				 dma_addr_t *dma_handle)
> +				 dma_addr_t *dma_handle,
> +				 unsigned int *address_bits_in)
>  {
>  	unsigned long *in_frames = discontig_frames, out_frame;
>  	unsigned long  flags;
> @@ -2336,7 +2342,7 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
>  	spin_lock_irqsave(&xen_reservation_lock, flags);
>  
>  	/* 1. Zap current PTEs, remembering MFNs. */
> -	xen_zap_pfn_range(vstart, order, in_frames, NULL);
> +	*address_bits_in = xen_zap_pfn_range(vstart, order, in_frames, NULL);
>  
>  	/* 2. Get a new contiguous memory extent. */
>  	out_frame = virt_to_pfn((void *)vstart);
> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> index 26c62e0d34e9..3f3724f53914 100644
> --- a/drivers/xen/swiotlb-xen.c
> +++ b/drivers/xen/swiotlb-xen.c
> @@ -118,6 +118,7 @@ int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
>  	int rc;
>  	unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
>  	unsigned int i, dma_bits = order + PAGE_SHIFT;
> +	unsigned int dummy;
>  	dma_addr_t dma_handle;
>  	phys_addr_t p = virt_to_phys(buf);
>  
> @@ -129,7 +130,7 @@ int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
>  		do {
>  			rc = xen_create_contiguous_region(
>  				p + (i << IO_TLB_SHIFT), order,
> -				dma_bits, &dma_handle);
> +				dma_bits, &dma_handle, &dummy);
>  		} while (rc && dma_bits++ < MAX_DMA_BITS);
>  		if (rc)
>  			return rc;
> @@ -144,6 +145,7 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
>  		dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
>  {
>  	u64 dma_mask = dev->coherent_dma_mask;
> +	unsigned int address_bits = fls64(dma_mask), address_bits_in;
>  	int order = get_order(size);
>  	phys_addr_t phys;
>  	void *ret;
> @@ -160,10 +162,11 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
>  	if (*dma_handle + size - 1 > dma_mask ||
>  	    range_straddles_page_boundary(phys, size) ||
>  	    range_requires_alignment(phys, size)) {
> -		if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
> -				dma_handle) != 0)
> +		if (xen_create_contiguous_region(phys, order, address_bits,
> +						 dma_handle, &address_bits_in))
>  			goto out_free_pages;
> -		SetPageXenRemapped(virt_to_page(ret));
> +		if (address_bits_in > address_bits)
> +			SetPageXenRemapped(virt_to_page(ret));

This has the unfortunate side effect of making "PageXenRemapped"
unreliable as an indicator of whether a page has been remapped. A page
could still be remapped without the "PageXenRemapped" bit being set.  

I recommend adding an in-code comment to clarify this behavior.



>  	}
>  
>  	memset(ret, 0, size);
> -- 
> 2.43.0
>
Jan Beulich Feb. 12, 2025, 7:38 a.m. UTC | #2
On 11.02.2025 13:04, Juergen Gross wrote:
> In case xen_swiotlb_alloc_coherent() needed to create a contiguous
> region only for other reason than the memory not being compliant with
> the device's DMA mask, there is no reason why this contiguous region
> should be destroyed by xen_swiotlb_free_coherent() later. Destroying
> this region should be done only, if the memory of the region was
> allocated with more stringent placement requirements than the memory
> it did replace.

I'm not convinced of this: Even the mere property of being contiguous
may already be enough to warrant freeing when possible. The hypervisor
may not have that many contiguous areas available. The bigger the
chunk, the more important to give it back once no longer needed in
this shape.

Plus also take into account how Xen behaves here: It specifically tries
to hold back, during boot, lower addressed memory to later satisfy such
requests. Hence even if you don't ask for address restricted memory,
you may get back such. You'd need to compare input and output addresses,
not input addresses and requested restriction to alleviate this.

> --- a/arch/x86/xen/mmu_pv.c
> +++ b/arch/x86/xen/mmu_pv.c
> @@ -2208,19 +2208,22 @@ void __init xen_init_mmu_ops(void)
>  static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
>  
>  #define VOID_PTE (mfn_pte(0, __pgprot(0)))
> -static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
> -				unsigned long *in_frames,
> -				unsigned long *out_frames)
> +static int xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
> +			     unsigned long *in_frames,
> +			     unsigned long *out_frames)
>  {
>  	int i;
> +	u64 address_bits = 0;

First I was inclined to suggest to use paddr_t here, but ...

>  	struct multicall_space mcs;
>  
>  	xen_mc_batch();
>  	for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
>  		mcs = __xen_mc_entry(0);
>  
> -		if (in_frames)
> +		if (in_frames) {
>  			in_frames[i] = virt_to_mfn((void *)vaddr);
> +			address_bits |= in_frames[i] << PAGE_SHIFT;

... why do a shift on every loop iteration when you can ...

> +		}
>  
>  		MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
>  		__set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY);
> @@ -2229,6 +2232,8 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
>  			out_frames[i] = virt_to_pfn((void *)vaddr);
>  	}
>  	xen_mc_issue(0);
> +
> +	return fls64(address_bits);

... simply add in PAGE_SHIFT here, once?

> @@ -2321,7 +2326,8 @@ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
>  
>  int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
>  				 unsigned int address_bits,
> -				 dma_addr_t *dma_handle)
> +				 dma_addr_t *dma_handle,
> +				 unsigned int *address_bits_in)
>  {
>  	unsigned long *in_frames = discontig_frames, out_frame;
>  	unsigned long  flags;
> @@ -2336,7 +2342,7 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
>  	spin_lock_irqsave(&xen_reservation_lock, flags);
>  
>  	/* 1. Zap current PTEs, remembering MFNs. */
> -	xen_zap_pfn_range(vstart, order, in_frames, NULL);
> +	*address_bits_in = xen_zap_pfn_range(vstart, order, in_frames, NULL);

Nit: Converting plain int to unsigned int, when there's no real reason
to do any conversion. Since xen_zap_pfn_range() can't return a negative
value for the caller caring about the return value (yet more obviously
so with the suggested adjustment, and then true for both callers), the
function could easily return unsigned int.

Jan
Juergen Gross Feb. 12, 2025, 11:11 a.m. UTC | #3
On 12.02.25 08:38, Jan Beulich wrote:
> On 11.02.2025 13:04, Juergen Gross wrote:
>> In case xen_swiotlb_alloc_coherent() needed to create a contiguous
>> region only for other reason than the memory not being compliant with
>> the device's DMA mask, there is no reason why this contiguous region
>> should be destroyed by xen_swiotlb_free_coherent() later. Destroying
>> this region should be done only, if the memory of the region was
>> allocated with more stringent placement requirements than the memory
>> it did replace.
> 
> I'm not convinced of this: Even the mere property of being contiguous
> may already be enough to warrant freeing when possible. The hypervisor
> may not have that many contiguous areas available. The bigger the
> chunk, the more important to give it back once no longer needed in
> this shape.

Really? When creating a domain Xen tries to use GB pages and 2MB pages as
much as possible. Why would this special case here have more restrictions?

> Plus also take into account how Xen behaves here: It specifically tries
> to hold back, during boot, lower addressed memory to later satisfy such
> requests. Hence even if you don't ask for address restricted memory,
> you may get back such. You'd need to compare input and output addresses,
> not input addresses and requested restriction to alleviate this.

Fair enough.

> 
>> --- a/arch/x86/xen/mmu_pv.c
>> +++ b/arch/x86/xen/mmu_pv.c
>> @@ -2208,19 +2208,22 @@ void __init xen_init_mmu_ops(void)
>>   static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
>>   
>>   #define VOID_PTE (mfn_pte(0, __pgprot(0)))
>> -static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
>> -				unsigned long *in_frames,
>> -				unsigned long *out_frames)
>> +static int xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
>> +			     unsigned long *in_frames,
>> +			     unsigned long *out_frames)
>>   {
>>   	int i;
>> +	u64 address_bits = 0;
> 
> First I was inclined to suggest to use paddr_t here, but ...
> 
>>   	struct multicall_space mcs;
>>   
>>   	xen_mc_batch();
>>   	for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
>>   		mcs = __xen_mc_entry(0);
>>   
>> -		if (in_frames)
>> +		if (in_frames) {
>>   			in_frames[i] = virt_to_mfn((void *)vaddr);
>> +			address_bits |= in_frames[i] << PAGE_SHIFT;
> 
> ... why do a shift on every loop iteration when you can ...
> 
>> +		}
>>   
>>   		MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
>>   		__set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY);
>> @@ -2229,6 +2232,8 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
>>   			out_frames[i] = virt_to_pfn((void *)vaddr);
>>   	}
>>   	xen_mc_issue(0);
>> +
>> +	return fls64(address_bits);
> 
> ... simply add in PAGE_SHIFT here, once?

True.

> 
>> @@ -2321,7 +2326,8 @@ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
>>   
>>   int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
>>   				 unsigned int address_bits,
>> -				 dma_addr_t *dma_handle)
>> +				 dma_addr_t *dma_handle,
>> +				 unsigned int *address_bits_in)
>>   {
>>   	unsigned long *in_frames = discontig_frames, out_frame;
>>   	unsigned long  flags;
>> @@ -2336,7 +2342,7 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
>>   	spin_lock_irqsave(&xen_reservation_lock, flags);
>>   
>>   	/* 1. Zap current PTEs, remembering MFNs. */
>> -	xen_zap_pfn_range(vstart, order, in_frames, NULL);
>> +	*address_bits_in = xen_zap_pfn_range(vstart, order, in_frames, NULL);
> 
> Nit: Converting plain int to unsigned int, when there's no real reason
> to do any conversion. Since xen_zap_pfn_range() can't return a negative
> value for the caller caring about the return value (yet more obviously
> so with the suggested adjustment, and then true for both callers), the
> function could easily return unsigned int.

Will change that.


Juergen
Juergen Gross Feb. 12, 2025, 11:15 a.m. UTC | #4
On 12.02.25 02:30, Stefano Stabellini wrote:
> On Tue, 11 Feb 2025, Juergen Gross wrote:
>> In case xen_swiotlb_alloc_coherent() needed to create a contiguous
>> region only for other reason than the memory not being compliant with
>> the device's DMA mask, there is no reason why this contiguous region
>> should be destroyed by xen_swiotlb_free_coherent() later. Destroying
>> this region should be done only, if the memory of the region was
>> allocated with more stringent placement requirements than the memory
>> it did replace.
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
>> ---
>>   arch/x86/include/asm/xen/swiotlb-xen.h |  5 +++--
>>   arch/x86/xen/mmu_pv.c                  | 18 ++++++++++++------
>>   drivers/xen/swiotlb-xen.c              | 11 +++++++----
>>   3 files changed, 22 insertions(+), 12 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h
>> index abde0f44df57..a353f20c7e79 100644
>> --- a/arch/x86/include/asm/xen/swiotlb-xen.h
>> +++ b/arch/x86/include/asm/xen/swiotlb-xen.h
>> @@ -4,8 +4,9 @@
>>   
>>   int xen_swiotlb_fixup(void *buf, unsigned long nslabs);
>>   int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
>> -				unsigned int address_bits,
>> -				dma_addr_t *dma_handle);
>> +				 unsigned int address_bits,
>> +				 dma_addr_t *dma_handle,
>> +				 unsigned int *address_bits_in);
>>   void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
>>   
>>   #endif /* _ASM_X86_SWIOTLB_XEN_H */
>> diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
>> index 2c70cd35e72c..fb586238f7c4 100644
>> --- a/arch/x86/xen/mmu_pv.c
>> +++ b/arch/x86/xen/mmu_pv.c
>> @@ -2208,19 +2208,22 @@ void __init xen_init_mmu_ops(void)
>>   static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
>>   
>>   #define VOID_PTE (mfn_pte(0, __pgprot(0)))
>> -static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
>> -				unsigned long *in_frames,
>> -				unsigned long *out_frames)
>> +static int xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
>> +			     unsigned long *in_frames,
>> +			     unsigned long *out_frames)
>>   {
>>   	int i;
>> +	u64 address_bits = 0;
>>   	struct multicall_space mcs;
>>   
>>   	xen_mc_batch();
>>   	for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
>>   		mcs = __xen_mc_entry(0);
>>   
>> -		if (in_frames)
>> +		if (in_frames) {
>>   			in_frames[i] = virt_to_mfn((void *)vaddr);
>> +			address_bits |= in_frames[i] << PAGE_SHIFT;
>> +		}
>>   
>>   		MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
>>   		__set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY);
>> @@ -2229,6 +2232,8 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
>>   			out_frames[i] = virt_to_pfn((void *)vaddr);
>>   	}
>>   	xen_mc_issue(0);
>> +
>> +	return fls64(address_bits);
>>   }
>>   
>>   /*
>> @@ -2321,7 +2326,8 @@ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
>>   
>>   int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
>>   				 unsigned int address_bits,
>> -				 dma_addr_t *dma_handle)
>> +				 dma_addr_t *dma_handle,
>> +				 unsigned int *address_bits_in)
>>   {
>>   	unsigned long *in_frames = discontig_frames, out_frame;
>>   	unsigned long  flags;
>> @@ -2336,7 +2342,7 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
>>   	spin_lock_irqsave(&xen_reservation_lock, flags);
>>   
>>   	/* 1. Zap current PTEs, remembering MFNs. */
>> -	xen_zap_pfn_range(vstart, order, in_frames, NULL);
>> +	*address_bits_in = xen_zap_pfn_range(vstart, order, in_frames, NULL);
>>   
>>   	/* 2. Get a new contiguous memory extent. */
>>   	out_frame = virt_to_pfn((void *)vstart);
>> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
>> index 26c62e0d34e9..3f3724f53914 100644
>> --- a/drivers/xen/swiotlb-xen.c
>> +++ b/drivers/xen/swiotlb-xen.c
>> @@ -118,6 +118,7 @@ int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
>>   	int rc;
>>   	unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
>>   	unsigned int i, dma_bits = order + PAGE_SHIFT;
>> +	unsigned int dummy;
>>   	dma_addr_t dma_handle;
>>   	phys_addr_t p = virt_to_phys(buf);
>>   
>> @@ -129,7 +130,7 @@ int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
>>   		do {
>>   			rc = xen_create_contiguous_region(
>>   				p + (i << IO_TLB_SHIFT), order,
>> -				dma_bits, &dma_handle);
>> +				dma_bits, &dma_handle, &dummy);
>>   		} while (rc && dma_bits++ < MAX_DMA_BITS);
>>   		if (rc)
>>   			return rc;
>> @@ -144,6 +145,7 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
>>   		dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
>>   {
>>   	u64 dma_mask = dev->coherent_dma_mask;
>> +	unsigned int address_bits = fls64(dma_mask), address_bits_in;
>>   	int order = get_order(size);
>>   	phys_addr_t phys;
>>   	void *ret;
>> @@ -160,10 +162,11 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
>>   	if (*dma_handle + size - 1 > dma_mask ||
>>   	    range_straddles_page_boundary(phys, size) ||
>>   	    range_requires_alignment(phys, size)) {
>> -		if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
>> -				dma_handle) != 0)
>> +		if (xen_create_contiguous_region(phys, order, address_bits,
>> +						 dma_handle, &address_bits_in))
>>   			goto out_free_pages;
>> -		SetPageXenRemapped(virt_to_page(ret));
>> +		if (address_bits_in > address_bits)
>> +			SetPageXenRemapped(virt_to_page(ret));
> 
> This has the unfortunate side effect of making "PageXenRemapped"
> unreliable as an indicator of whether a page has been remapped. A page
> could still be remapped without the "PageXenRemapped" bit being set.
> 
> I recommend adding an in-code comment to clarify this behavior.

The PageXenRemapped bit is used only for determining whether
xen_destroy_contiguous_region() should be called. And by not setting the bit
I'm avoiding to call xen_destroy_contiguous_region() later. So I don't see any
unfortunate side effect.


Juergen
Jan Beulich Feb. 12, 2025, 11:49 a.m. UTC | #5
On 12.02.2025 12:11, Jürgen Groß wrote:
> On 12.02.25 08:38, Jan Beulich wrote:
>> On 11.02.2025 13:04, Juergen Gross wrote:
>>> In case xen_swiotlb_alloc_coherent() needed to create a contiguous
>>> region only for other reason than the memory not being compliant with
>>> the device's DMA mask, there is no reason why this contiguous region
>>> should be destroyed by xen_swiotlb_free_coherent() later. Destroying
>>> this region should be done only, if the memory of the region was
>>> allocated with more stringent placement requirements than the memory
>>> it did replace.
>>
>> I'm not convinced of this: Even the mere property of being contiguous
>> may already be enough to warrant freeing when possible. The hypervisor
>> may not have that many contiguous areas available. The bigger the
>> chunk, the more important to give it back once no longer needed in
>> this shape.
> 
> Really? When creating a domain Xen tries to use GB pages and 2MB pages as
> much as possible. Why would this special case here have more restrictions?

There aren't many Gb pages to be had from the space below 4Gb; frequently
there'll be at most one (covering the range from 1 to 2 Gb). For that as
well as 2Mb ones I think it is a mistake that Xen may hand them out, when
the caller could fall back to 4k allocations. Thing is that without extra
information it's hard to come up with a good heuristic to decide whether
the caller is capable of falling back. Perhaps e.g. populate_physmap()
should add MEMF_no_dma to higher order allocation requests targeting other
than the current domain, or when !d->creation_finished.

Jan
Stefano Stabellini Feb. 12, 2025, 9:14 p.m. UTC | #6
On Wed, 12 Feb 2025, Jürgen Groß wrote:
> On 12.02.25 02:30, Stefano Stabellini wrote:
> > On Tue, 11 Feb 2025, Juergen Gross wrote:
> > > In case xen_swiotlb_alloc_coherent() needed to create a contiguous
> > > region only for other reason than the memory not being compliant with
> > > the device's DMA mask, there is no reason why this contiguous region
> > > should be destroyed by xen_swiotlb_free_coherent() later. Destroying
> > > this region should be done only, if the memory of the region was
> > > allocated with more stringent placement requirements than the memory
> > > it did replace.
> > > 
> > > Signed-off-by: Juergen Gross <jgross@suse.com>
> > > ---
> > >   arch/x86/include/asm/xen/swiotlb-xen.h |  5 +++--
> > >   arch/x86/xen/mmu_pv.c                  | 18 ++++++++++++------
> > >   drivers/xen/swiotlb-xen.c              | 11 +++++++----
> > >   3 files changed, 22 insertions(+), 12 deletions(-)
> > > 
> > > diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h
> > > b/arch/x86/include/asm/xen/swiotlb-xen.h
> > > index abde0f44df57..a353f20c7e79 100644
> > > --- a/arch/x86/include/asm/xen/swiotlb-xen.h
> > > +++ b/arch/x86/include/asm/xen/swiotlb-xen.h
> > > @@ -4,8 +4,9 @@
> > >     int xen_swiotlb_fixup(void *buf, unsigned long nslabs);
> > >   int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
> > > -				unsigned int address_bits,
> > > -				dma_addr_t *dma_handle);
> > > +				 unsigned int address_bits,
> > > +				 dma_addr_t *dma_handle,
> > > +				 unsigned int *address_bits_in);
> > >   void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int
> > > order);
> > >     #endif /* _ASM_X86_SWIOTLB_XEN_H */
> > > diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
> > > index 2c70cd35e72c..fb586238f7c4 100644
> > > --- a/arch/x86/xen/mmu_pv.c
> > > +++ b/arch/x86/xen/mmu_pv.c
> > > @@ -2208,19 +2208,22 @@ void __init xen_init_mmu_ops(void)
> > >   static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
> > >     #define VOID_PTE (mfn_pte(0, __pgprot(0)))
> > > -static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
> > > -				unsigned long *in_frames,
> > > -				unsigned long *out_frames)
> > > +static int xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
> > > +			     unsigned long *in_frames,
> > > +			     unsigned long *out_frames)
> > >   {
> > >   	int i;
> > > +	u64 address_bits = 0;
> > >   	struct multicall_space mcs;
> > >     	xen_mc_batch();
> > >   	for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
> > >   		mcs = __xen_mc_entry(0);
> > >   -		if (in_frames)
> > > +		if (in_frames) {
> > >   			in_frames[i] = virt_to_mfn((void *)vaddr);
> > > +			address_bits |= in_frames[i] << PAGE_SHIFT;
> > > +		}
> > >     		MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
> > >   		__set_phys_to_machine(virt_to_pfn((void *)vaddr),
> > > INVALID_P2M_ENTRY);
> > > @@ -2229,6 +2232,8 @@ static void xen_zap_pfn_range(unsigned long vaddr,
> > > unsigned int order,
> > >   			out_frames[i] = virt_to_pfn((void *)vaddr);
> > >   	}
> > >   	xen_mc_issue(0);
> > > +
> > > +	return fls64(address_bits);
> > >   }
> > >     /*
> > > @@ -2321,7 +2326,8 @@ static int xen_exchange_memory(unsigned long
> > > extents_in, unsigned int order_in,
> > >     int xen_create_contiguous_region(phys_addr_t pstart, unsigned int
> > > order,
> > >   				 unsigned int address_bits,
> > > -				 dma_addr_t *dma_handle)
> > > +				 dma_addr_t *dma_handle,
> > > +				 unsigned int *address_bits_in)
> > >   {
> > >   	unsigned long *in_frames = discontig_frames, out_frame;
> > >   	unsigned long  flags;
> > > @@ -2336,7 +2342,7 @@ int xen_create_contiguous_region(phys_addr_t pstart,
> > > unsigned int order,
> > >   	spin_lock_irqsave(&xen_reservation_lock, flags);
> > >     	/* 1. Zap current PTEs, remembering MFNs. */
> > > -	xen_zap_pfn_range(vstart, order, in_frames, NULL);
> > > +	*address_bits_in = xen_zap_pfn_range(vstart, order, in_frames, NULL);
> > >     	/* 2. Get a new contiguous memory extent. */
> > >   	out_frame = virt_to_pfn((void *)vstart);
> > > diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> > > index 26c62e0d34e9..3f3724f53914 100644
> > > --- a/drivers/xen/swiotlb-xen.c
> > > +++ b/drivers/xen/swiotlb-xen.c
> > > @@ -118,6 +118,7 @@ int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
> > >   	int rc;
> > >   	unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
> > >   	unsigned int i, dma_bits = order + PAGE_SHIFT;
> > > +	unsigned int dummy;
> > >   	dma_addr_t dma_handle;
> > >   	phys_addr_t p = virt_to_phys(buf);
> > >   @@ -129,7 +130,7 @@ int xen_swiotlb_fixup(void *buf, unsigned long
> > > nslabs)
> > >   		do {
> > >   			rc = xen_create_contiguous_region(
> > >   				p + (i << IO_TLB_SHIFT), order,
> > > -				dma_bits, &dma_handle);
> > > +				dma_bits, &dma_handle, &dummy);
> > >   		} while (rc && dma_bits++ < MAX_DMA_BITS);
> > >   		if (rc)
> > >   			return rc;
> > > @@ -144,6 +145,7 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t
> > > size,
> > >   		dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
> > >   {
> > >   	u64 dma_mask = dev->coherent_dma_mask;
> > > +	unsigned int address_bits = fls64(dma_mask), address_bits_in;
> > >   	int order = get_order(size);
> > >   	phys_addr_t phys;
> > >   	void *ret;
> > > @@ -160,10 +162,11 @@ xen_swiotlb_alloc_coherent(struct device *dev,
> > > size_t size,
> > >   	if (*dma_handle + size - 1 > dma_mask ||
> > >   	    range_straddles_page_boundary(phys, size) ||
> > >   	    range_requires_alignment(phys, size)) {
> > > -		if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
> > > -				dma_handle) != 0)
> > > +		if (xen_create_contiguous_region(phys, order, address_bits,
> > > +						 dma_handle,
> > > &address_bits_in))
> > >   			goto out_free_pages;
> > > -		SetPageXenRemapped(virt_to_page(ret));
> > > +		if (address_bits_in > address_bits)
> > > +			SetPageXenRemapped(virt_to_page(ret));
> > 
> > This has the unfortunate side effect of making "PageXenRemapped"
> > unreliable as an indicator of whether a page has been remapped. A page
> > could still be remapped without the "PageXenRemapped" bit being set.
> > 
> > I recommend adding an in-code comment to clarify this behavior.
> 
> The PageXenRemapped bit is used only for determining whether
> xen_destroy_contiguous_region() should be called. And by not setting the bit
> I'm avoiding to call xen_destroy_contiguous_region() later. So I don't see any
> unfortunate side effect.

While the purpose of PageXenRemapped is to determine whether
xen_destroy_contiguous_region() should be called for the region, the
name "PageXenRemapped" suggests more generically that the region is
remapped.

Without this patch, all the regions that are remapped have
PageXenRemapped set. The name matches its meaning. (Also,
xen_destroy_contiguous_region() is called an them.)

With this patch, not all the regions that are remapped have
PageXenRemapped set. The name does not match its meaning.
diff mbox series

Patch

diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h
index abde0f44df57..a353f20c7e79 100644
--- a/arch/x86/include/asm/xen/swiotlb-xen.h
+++ b/arch/x86/include/asm/xen/swiotlb-xen.h
@@ -4,8 +4,9 @@ 
 
 int xen_swiotlb_fixup(void *buf, unsigned long nslabs);
 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
-				unsigned int address_bits,
-				dma_addr_t *dma_handle);
+				 unsigned int address_bits,
+				 dma_addr_t *dma_handle,
+				 unsigned int *address_bits_in);
 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
 
 #endif /* _ASM_X86_SWIOTLB_XEN_H */
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 2c70cd35e72c..fb586238f7c4 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2208,19 +2208,22 @@  void __init xen_init_mmu_ops(void)
 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
 
 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
-static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
-				unsigned long *in_frames,
-				unsigned long *out_frames)
+static int xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
+			     unsigned long *in_frames,
+			     unsigned long *out_frames)
 {
 	int i;
+	u64 address_bits = 0;
 	struct multicall_space mcs;
 
 	xen_mc_batch();
 	for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
 		mcs = __xen_mc_entry(0);
 
-		if (in_frames)
+		if (in_frames) {
 			in_frames[i] = virt_to_mfn((void *)vaddr);
+			address_bits |= in_frames[i] << PAGE_SHIFT;
+		}
 
 		MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
 		__set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY);
@@ -2229,6 +2232,8 @@  static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
 			out_frames[i] = virt_to_pfn((void *)vaddr);
 	}
 	xen_mc_issue(0);
+
+	return fls64(address_bits);
 }
 
 /*
@@ -2321,7 +2326,8 @@  static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
 
 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
 				 unsigned int address_bits,
-				 dma_addr_t *dma_handle)
+				 dma_addr_t *dma_handle,
+				 unsigned int *address_bits_in)
 {
 	unsigned long *in_frames = discontig_frames, out_frame;
 	unsigned long  flags;
@@ -2336,7 +2342,7 @@  int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
 	spin_lock_irqsave(&xen_reservation_lock, flags);
 
 	/* 1. Zap current PTEs, remembering MFNs. */
-	xen_zap_pfn_range(vstart, order, in_frames, NULL);
+	*address_bits_in = xen_zap_pfn_range(vstart, order, in_frames, NULL);
 
 	/* 2. Get a new contiguous memory extent. */
 	out_frame = virt_to_pfn((void *)vstart);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 26c62e0d34e9..3f3724f53914 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -118,6 +118,7 @@  int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
 	int rc;
 	unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
 	unsigned int i, dma_bits = order + PAGE_SHIFT;
+	unsigned int dummy;
 	dma_addr_t dma_handle;
 	phys_addr_t p = virt_to_phys(buf);
 
@@ -129,7 +130,7 @@  int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
 		do {
 			rc = xen_create_contiguous_region(
 				p + (i << IO_TLB_SHIFT), order,
-				dma_bits, &dma_handle);
+				dma_bits, &dma_handle, &dummy);
 		} while (rc && dma_bits++ < MAX_DMA_BITS);
 		if (rc)
 			return rc;
@@ -144,6 +145,7 @@  xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
 {
 	u64 dma_mask = dev->coherent_dma_mask;
+	unsigned int address_bits = fls64(dma_mask), address_bits_in;
 	int order = get_order(size);
 	phys_addr_t phys;
 	void *ret;
@@ -160,10 +162,11 @@  xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
 	if (*dma_handle + size - 1 > dma_mask ||
 	    range_straddles_page_boundary(phys, size) ||
 	    range_requires_alignment(phys, size)) {
-		if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
-				dma_handle) != 0)
+		if (xen_create_contiguous_region(phys, order, address_bits,
+						 dma_handle, &address_bits_in))
 			goto out_free_pages;
-		SetPageXenRemapped(virt_to_page(ret));
+		if (address_bits_in > address_bits)
+			SetPageXenRemapped(virt_to_page(ret));
 	}
 
 	memset(ret, 0, size);