diff mbox series

[V2,2/2] xen/virtio: Convert PAGE_SIZE/PAGE_SHIFT/PFN_UP to Xen counterparts

Message ID 20221008151013.2537826-3-olekstysh@gmail.com (mailing list archive)
State Accepted
Commit a383dcb1cca8305497877119fba0a320f41fe853
Headers show
Series Other misc fixes for Xen grant DMA-mapping layer | expand

Commit Message

Oleksandr Tyshchenko Oct. 8, 2022, 3:10 p.m. UTC
From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

Currently, a grant ref is always based on the Xen page granularity
(4KB), and guest commonly uses the same page granularity.
But the guest may use a different page granularity (i.e 64KB).
So adopt the code to be able to deal with it.

Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
---
Cc: Juergen Gross <jgross@suse.com>
Cc: Xenia Ragiadakou <burzalodowa@gmail.com>

Changes V1 -> V2:
   - update commit description
   - rebase
   - use xen_offset_in_page() in xen_grant_dma_map(unmap)_page()

Previous discussion is at:
https://lore.kernel.org/xen-devel/20221006120912.1948459-1-olekstysh@gmail.com/
---
 drivers/xen/grant-dma-ops.c | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

Comments

Stefano Stabellini Oct. 14, 2022, 12:24 a.m. UTC | #1
On Sat, 8 Oct 2022, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> 
> Currently, a grant ref is always based on the Xen page granularity
> (4KB), and guest commonly uses the same page granularity.
> But the guest may use a different page granularity (i.e 64KB).
> So adopt the code to be able to deal with it.
> 
> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

Acked-by: Stefano Stabellini <sstabellini@kernel.org>


> ---
> Cc: Juergen Gross <jgross@suse.com>
> Cc: Xenia Ragiadakou <burzalodowa@gmail.com>
> 
> Changes V1 -> V2:
>    - update commit description
>    - rebase
>    - use xen_offset_in_page() in xen_grant_dma_map(unmap)_page()
> 
> Previous discussion is at:
> https://lore.kernel.org/xen-devel/20221006120912.1948459-1-olekstysh@gmail.com/
> ---
>  drivers/xen/grant-dma-ops.c | 24 ++++++++++++------------
>  1 file changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
> index 1d018e3a68a0..aff0f95ed954 100644
> --- a/drivers/xen/grant-dma-ops.c
> +++ b/drivers/xen/grant-dma-ops.c
> @@ -31,12 +31,12 @@ static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
>  
>  static inline dma_addr_t grant_to_dma(grant_ref_t grant)
>  {
> -	return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
> +	return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT);
>  }
>  
>  static inline grant_ref_t dma_to_grant(dma_addr_t dma)
>  {
> -	return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
> +	return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT);
>  }
>  
>  static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
> @@ -79,7 +79,7 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
>  				 unsigned long attrs)
>  {
>  	struct xen_grant_dma_data *data;
> -	unsigned int i, n_pages = PFN_UP(size);
> +	unsigned int i, n_pages = XEN_PFN_UP(size);
>  	unsigned long pfn;
>  	grant_ref_t grant;
>  	void *ret;
> @@ -91,14 +91,14 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
>  	if (unlikely(data->broken))
>  		return NULL;
>  
> -	ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
> +	ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp);
>  	if (!ret)
>  		return NULL;
>  
>  	pfn = virt_to_pfn(ret);
>  
>  	if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
> -		free_pages_exact(ret, n_pages * PAGE_SIZE);
> +		free_pages_exact(ret, n_pages * XEN_PAGE_SIZE);
>  		return NULL;
>  	}
>  
> @@ -116,7 +116,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
>  			       dma_addr_t dma_handle, unsigned long attrs)
>  {
>  	struct xen_grant_dma_data *data;
> -	unsigned int i, n_pages = PFN_UP(size);
> +	unsigned int i, n_pages = XEN_PFN_UP(size);
>  	grant_ref_t grant;
>  
>  	data = find_xen_grant_dma_data(dev);
> @@ -138,7 +138,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
>  
>  	gnttab_free_grant_reference_seq(grant, n_pages);
>  
> -	free_pages_exact(vaddr, n_pages * PAGE_SIZE);
> +	free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
>  }
>  
>  static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
> @@ -168,9 +168,9 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
>  					 unsigned long attrs)
>  {
>  	struct xen_grant_dma_data *data;
> -	unsigned long dma_offset = offset_in_page(offset),
> -			pfn_offset = PFN_DOWN(offset);
> -	unsigned int i, n_pages = PFN_UP(dma_offset + size);
> +	unsigned long dma_offset = xen_offset_in_page(offset),
> +			pfn_offset = XEN_PFN_DOWN(offset);
> +	unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
>  	grant_ref_t grant;
>  	dma_addr_t dma_handle;
>  
> @@ -203,8 +203,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
>  				     unsigned long attrs)
>  {
>  	struct xen_grant_dma_data *data;
> -	unsigned long offset = dma_handle & (PAGE_SIZE - 1);
> -	unsigned int i, n_pages = PFN_UP(offset + size);
> +	unsigned long dma_offset = xen_offset_in_page(dma_handle);
> +	unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
>  	grant_ref_t grant;
>  
>  	if (WARN_ON(dir == DMA_NONE))
> -- 
> 2.25.1
>
Xenia Ragiadakou Oct. 14, 2022, 7:35 a.m. UTC | #2
On 10/8/22 18:10, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> 
> Currently, a grant ref is always based on the Xen page granularity
> (4KB), and guest commonly uses the same page granularity.
> But the guest may use a different page granularity (i.e 64KB).
> So adopt the code to be able to deal with it.
> 
> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

Reviewed-by: Xenia Ragiadakou <burzalodowa@gmail.com>

> ---
> Cc: Juergen Gross <jgross@suse.com>
> Cc: Xenia Ragiadakou <burzalodowa@gmail.com>
> 
> Changes V1 -> V2:
>     - update commit description
>     - rebase
>     - use xen_offset_in_page() in xen_grant_dma_map(unmap)_page()
> 
> Previous discussion is at:
> https://lore.kernel.org/xen-devel/20221006120912.1948459-1-olekstysh@gmail.com/
> ---
>   drivers/xen/grant-dma-ops.c | 24 ++++++++++++------------
>   1 file changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
> index 1d018e3a68a0..aff0f95ed954 100644
> --- a/drivers/xen/grant-dma-ops.c
> +++ b/drivers/xen/grant-dma-ops.c
> @@ -31,12 +31,12 @@ static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
>   
>   static inline dma_addr_t grant_to_dma(grant_ref_t grant)
>   {
> -	return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
> +	return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT);
>   }
>   
>   static inline grant_ref_t dma_to_grant(dma_addr_t dma)
>   {
> -	return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
> +	return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT);
>   }
>   
>   static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
> @@ -79,7 +79,7 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
>   				 unsigned long attrs)
>   {
>   	struct xen_grant_dma_data *data;
> -	unsigned int i, n_pages = PFN_UP(size);
> +	unsigned int i, n_pages = XEN_PFN_UP(size);
>   	unsigned long pfn;
>   	grant_ref_t grant;
>   	void *ret;
> @@ -91,14 +91,14 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size,
>   	if (unlikely(data->broken))
>   		return NULL;
>   
> -	ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
> +	ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp);
>   	if (!ret)
>   		return NULL;
>   
>   	pfn = virt_to_pfn(ret);
>   
>   	if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
> -		free_pages_exact(ret, n_pages * PAGE_SIZE);
> +		free_pages_exact(ret, n_pages * XEN_PAGE_SIZE);
>   		return NULL;
>   	}
>   
> @@ -116,7 +116,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
>   			       dma_addr_t dma_handle, unsigned long attrs)
>   {
>   	struct xen_grant_dma_data *data;
> -	unsigned int i, n_pages = PFN_UP(size);
> +	unsigned int i, n_pages = XEN_PFN_UP(size);
>   	grant_ref_t grant;
>   
>   	data = find_xen_grant_dma_data(dev);
> @@ -138,7 +138,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
>   
>   	gnttab_free_grant_reference_seq(grant, n_pages);
>   
> -	free_pages_exact(vaddr, n_pages * PAGE_SIZE);
> +	free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
>   }
>   
>   static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
> @@ -168,9 +168,9 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
>   					 unsigned long attrs)
>   {
>   	struct xen_grant_dma_data *data;
> -	unsigned long dma_offset = offset_in_page(offset),
> -			pfn_offset = PFN_DOWN(offset);
> -	unsigned int i, n_pages = PFN_UP(dma_offset + size);
> +	unsigned long dma_offset = xen_offset_in_page(offset),
> +			pfn_offset = XEN_PFN_DOWN(offset);
> +	unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
>   	grant_ref_t grant;
>   	dma_addr_t dma_handle;
>   
> @@ -203,8 +203,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
>   				     unsigned long attrs)
>   {
>   	struct xen_grant_dma_data *data;
> -	unsigned long offset = dma_handle & (PAGE_SIZE - 1);
> -	unsigned int i, n_pages = PFN_UP(offset + size);
> +	unsigned long dma_offset = xen_offset_in_page(dma_handle);
> +	unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
>   	grant_ref_t grant;
>   
>   	if (WARN_ON(dir == DMA_NONE))
diff mbox series

Patch

diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index 1d018e3a68a0..aff0f95ed954 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -31,12 +31,12 @@  static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
 
 static inline dma_addr_t grant_to_dma(grant_ref_t grant)
 {
-	return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
+	return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT);
 }
 
 static inline grant_ref_t dma_to_grant(dma_addr_t dma)
 {
-	return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
+	return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT);
 }
 
 static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
@@ -79,7 +79,7 @@  static void *xen_grant_dma_alloc(struct device *dev, size_t size,
 				 unsigned long attrs)
 {
 	struct xen_grant_dma_data *data;
-	unsigned int i, n_pages = PFN_UP(size);
+	unsigned int i, n_pages = XEN_PFN_UP(size);
 	unsigned long pfn;
 	grant_ref_t grant;
 	void *ret;
@@ -91,14 +91,14 @@  static void *xen_grant_dma_alloc(struct device *dev, size_t size,
 	if (unlikely(data->broken))
 		return NULL;
 
-	ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
+	ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp);
 	if (!ret)
 		return NULL;
 
 	pfn = virt_to_pfn(ret);
 
 	if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
-		free_pages_exact(ret, n_pages * PAGE_SIZE);
+		free_pages_exact(ret, n_pages * XEN_PAGE_SIZE);
 		return NULL;
 	}
 
@@ -116,7 +116,7 @@  static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
 			       dma_addr_t dma_handle, unsigned long attrs)
 {
 	struct xen_grant_dma_data *data;
-	unsigned int i, n_pages = PFN_UP(size);
+	unsigned int i, n_pages = XEN_PFN_UP(size);
 	grant_ref_t grant;
 
 	data = find_xen_grant_dma_data(dev);
@@ -138,7 +138,7 @@  static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
 
 	gnttab_free_grant_reference_seq(grant, n_pages);
 
-	free_pages_exact(vaddr, n_pages * PAGE_SIZE);
+	free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
 }
 
 static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
@@ -168,9 +168,9 @@  static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
 					 unsigned long attrs)
 {
 	struct xen_grant_dma_data *data;
-	unsigned long dma_offset = offset_in_page(offset),
-			pfn_offset = PFN_DOWN(offset);
-	unsigned int i, n_pages = PFN_UP(dma_offset + size);
+	unsigned long dma_offset = xen_offset_in_page(offset),
+			pfn_offset = XEN_PFN_DOWN(offset);
+	unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
 	grant_ref_t grant;
 	dma_addr_t dma_handle;
 
@@ -203,8 +203,8 @@  static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
 				     unsigned long attrs)
 {
 	struct xen_grant_dma_data *data;
-	unsigned long offset = dma_handle & (PAGE_SIZE - 1);
-	unsigned int i, n_pages = PFN_UP(offset + size);
+	unsigned long dma_offset = xen_offset_in_page(dma_handle);
+	unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
 	grant_ref_t grant;
 
 	if (WARN_ON(dir == DMA_NONE))