diff mbox series

sparse: use force attribute for vm_fault_t casts

Message ID cf47f8c3-c4f3-7f80-ce17-ed9fbc7fe424@openvz.org (mailing list archive)
State Superseded
Headers show
Series sparse: use force attribute for vm_fault_t casts | expand

Commit Message

Vasily Averin May 14, 2022, 2:26 p.m. UTC
Fixes sparse warnings:
./include/trace/events/fs_dax.h:10:1: sparse:
    got restricted vm_fault_t
./include/trace/events/fs_dax.h:153:1: sparse:
    got restricted vm_fault_t
fs/dax.c:563:39: sparse:    got restricted vm_fault_t
fs/dax.c:565:39: sparse:    got restricted vm_fault_t
fs/dax.c:569:31: sparse:    got restricted vm_fault_t
fs/dax.c:1055:41: sparse:
    got restricted vm_fault_t [assigned] [usertype] ret
fs/dax.c:1461:46: sparse:    got restricted vm_fault_t [usertype] ret
fs/dax.c:1477:21: sparse:
    expected restricted vm_fault_t [assigned] [usertype] ret
fs/dax.c:1518:51: sparse:
    got restricted vm_fault_t [assigned] [usertype] ret
fs/dax.c:1599:21: sparse:
    expected restricted vm_fault_t [assigned] [usertype] ret
fs/dax.c:1633:62: sparse:
    got restricted vm_fault_t [assigned] [usertype] ret
fs/dax.c:1696:55: sparse:    got restricted vm_fault_t
fs/dax.c:1711:58: sparse:
    got restricted vm_fault_t [assigned] [usertype] ret

vm_fault_t type is bitwise and requires __force attribute for any casts.

Signed-off-by: Vasily Averin <vvs@openvz.org>
---
 fs/dax.c                 | 22 +++++++++++-----------
 include/linux/mm_types.h | 30 ++++++++++++++++--------------
 2 files changed, 27 insertions(+), 25 deletions(-)

Comments

Matthew Wilcox (Oracle) May 14, 2022, 5:34 p.m. UTC | #1
On Sat, May 14, 2022 at 05:26:21PM +0300, Vasily Averin wrote:
> Fixes sparse warnings:
> ./include/trace/events/fs_dax.h:10:1: sparse:
>     got restricted vm_fault_t
> ./include/trace/events/fs_dax.h:153:1: sparse:
>     got restricted vm_fault_t
> fs/dax.c:563:39: sparse:    got restricted vm_fault_t
> fs/dax.c:565:39: sparse:    got restricted vm_fault_t
> fs/dax.c:569:31: sparse:    got restricted vm_fault_t
> fs/dax.c:1055:41: sparse:
>     got restricted vm_fault_t [assigned] [usertype] ret
> fs/dax.c:1461:46: sparse:    got restricted vm_fault_t [usertype] ret
> fs/dax.c:1477:21: sparse:
>     expected restricted vm_fault_t [assigned] [usertype] ret
> fs/dax.c:1518:51: sparse:
>     got restricted vm_fault_t [assigned] [usertype] ret
> fs/dax.c:1599:21: sparse:
>     expected restricted vm_fault_t [assigned] [usertype] ret
> fs/dax.c:1633:62: sparse:
>     got restricted vm_fault_t [assigned] [usertype] ret
> fs/dax.c:1696:55: sparse:    got restricted vm_fault_t
> fs/dax.c:1711:58: sparse:
>     got restricted vm_fault_t [assigned] [usertype] ret
> 
> vm_fault_t type is bitwise and requires __force attribute for any casts.

Well, this patch is all kinds of messy.  I would rather we had better
abstractions.  For example ...

> @@ -560,13 +560,13 @@ static void *grab_mapping_entry(struct xa_state *xas,
>  	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
>  		goto retry;
>  	if (xas->xa_node == XA_ERROR(-ENOMEM))
> -		return xa_mk_internal(VM_FAULT_OOM);
> +		return xa_mk_internal((__force unsigned long)VM_FAULT_OOM);
>  	if (xas_error(xas))
> -		return xa_mk_internal(VM_FAULT_SIGBUS);
> +		return xa_mk_internal((__force unsigned long)VM_FAULT_SIGBUS);
>  	return entry;
>  fallback:
>  	xas_unlock_irq(xas);
> -	return xa_mk_internal(VM_FAULT_FALLBACK);
> +	return xa_mk_internal((__force unsigned long)VM_FAULT_FALLBACK);
>  }

	return vm_fault_encode(VM_FAULT_xxx);

>  /**
> @@ -1052,7 +1052,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
>  			DAX_ZERO_PAGE, false);
>  
>  	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
> -	trace_dax_load_hole(inode, vmf, ret);
> +	trace_dax_load_hole(inode, vmf, (__force int)ret);

Seems like trace_dax_load_hole() should take a vm_fault_t?

> -	trace_dax_pte_fault(iter.inode, vmf, ret);
> +	trace_dax_pte_fault(iter.inode, vmf, (__force int)ret);

Ditto.

> @@ -1474,7 +1474,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
>  
>  	entry = grab_mapping_entry(&xas, mapping, 0);
>  	if (xa_is_internal(entry)) {
> -		ret = xa_to_internal(entry);
> +		ret = (__force vm_fault_t)xa_to_internal(entry);

vm_fault_decode(entry)?

... the others seem like more of the same.  So I'm in favour of what
you're doing, but would rather it were done differently.  Generally
seeing __force casts in the body of a function is a sign that things are
wrong; it's better to have them hidden in abstractions.
diff mbox series

Patch

diff --git a/fs/dax.c b/fs/dax.c
index 67a08a32fccb..eb1a1808f719 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -560,13 +560,13 @@  static void *grab_mapping_entry(struct xa_state *xas,
 	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
 		goto retry;
 	if (xas->xa_node == XA_ERROR(-ENOMEM))
-		return xa_mk_internal(VM_FAULT_OOM);
+		return xa_mk_internal((__force unsigned long)VM_FAULT_OOM);
 	if (xas_error(xas))
-		return xa_mk_internal(VM_FAULT_SIGBUS);
+		return xa_mk_internal((__force unsigned long)VM_FAULT_SIGBUS);
 	return entry;
 fallback:
 	xas_unlock_irq(xas);
-	return xa_mk_internal(VM_FAULT_FALLBACK);
+	return xa_mk_internal((__force unsigned long)VM_FAULT_FALLBACK);
 }
 
 /**
@@ -1052,7 +1052,7 @@  static vm_fault_t dax_load_hole(struct xa_state *xas,
 			DAX_ZERO_PAGE, false);
 
 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
-	trace_dax_load_hole(inode, vmf, ret);
+	trace_dax_load_hole(inode, vmf, (__force int)ret);
 	return ret;
 }
 
@@ -1458,7 +1458,7 @@  static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	void *entry;
 	int error;
 
-	trace_dax_pte_fault(iter.inode, vmf, ret);
+	trace_dax_pte_fault(iter.inode, vmf, (__force int)ret);
 	/*
 	 * Check whether offset isn't beyond end of file now. Caller is supposed
 	 * to hold locks serializing us with truncate / punch hole so this is
@@ -1474,7 +1474,7 @@  static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 
 	entry = grab_mapping_entry(&xas, mapping, 0);
 	if (xa_is_internal(entry)) {
-		ret = xa_to_internal(entry);
+		ret = (__force vm_fault_t)xa_to_internal(entry);
 		goto out;
 	}
 
@@ -1515,7 +1515,7 @@  static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 unlock_entry:
 	dax_unlock_entry(&xas, entry);
 out:
-	trace_dax_pte_fault_done(iter.inode, vmf, ret);
+	trace_dax_pte_fault_done(iter.inode, vmf, (__force int)ret);
 	return ret;
 }
 
@@ -1596,7 +1596,7 @@  static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	 */
 	entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
 	if (xa_is_internal(entry)) {
-		ret = xa_to_internal(entry);
+		ret = (__force vm_fault_t)xa_to_internal(entry);
 		goto fallback;
 	}
 
@@ -1630,7 +1630,7 @@  static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		count_vm_event(THP_FAULT_FALLBACK);
 	}
 out:
-	trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
+	trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, (__force int)ret);
 	return ret;
 }
 #else
@@ -1693,7 +1693,7 @@  dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
 		put_unlocked_entry(&xas, entry, WAKE_NEXT);
 		xas_unlock_irq(&xas);
 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
-						      VM_FAULT_NOPAGE);
+						      (__force int)VM_FAULT_NOPAGE);
 		return VM_FAULT_NOPAGE;
 	}
 	xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
@@ -1708,7 +1708,7 @@  dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
 	else
 		ret = VM_FAULT_FALLBACK;
 	dax_unlock_entry(&xas, entry);
-	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
+	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, (__force int)ret);
 	return ret;
 }
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8834e38c06a4..57cc4918b1b1 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -745,20 +745,22 @@  enum vm_fault_reason {
 			VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |	\
 			VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
 
-#define VM_FAULT_RESULT_TRACE \
-	{ VM_FAULT_OOM,                 "OOM" },	\
-	{ VM_FAULT_SIGBUS,              "SIGBUS" },	\
-	{ VM_FAULT_MAJOR,               "MAJOR" },	\
-	{ VM_FAULT_WRITE,               "WRITE" },	\
-	{ VM_FAULT_HWPOISON,            "HWPOISON" },	\
-	{ VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },	\
-	{ VM_FAULT_SIGSEGV,             "SIGSEGV" },	\
-	{ VM_FAULT_NOPAGE,              "NOPAGE" },	\
-	{ VM_FAULT_LOCKED,              "LOCKED" },	\
-	{ VM_FAULT_RETRY,               "RETRY" },	\
-	{ VM_FAULT_FALLBACK,            "FALLBACK" },	\
-	{ VM_FAULT_DONE_COW,            "DONE_COW" },	\
-	{ VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" }
+#define faultflag_string(flag) {(__force unsigned long)VM_FAULT_##flag, #flag}
+
+#define VM_FAULT_RESULT_TRACE			\
+	faultflag_string(OOM),			\
+	faultflag_string(SIGBUS),		\
+	faultflag_string(MAJOR),		\
+	faultflag_string(WRITE),		\
+	faultflag_string(HWPOISON),		\
+	faultflag_string(HWPOISON_LARGE),	\
+	faultflag_string(SIGSEGV),		\
+	faultflag_string(NOPAGE),		\
+	faultflag_string(LOCKED),		\
+	faultflag_string(RETRY),		\
+	faultflag_string(FALLBACK),		\
+	faultflag_string(DONE_COW),		\
+	faultflag_string(NEEDDSYNC)
 
 struct vm_special_mapping {
 	const char *name;	/* The name, e.g. "[vdso]". */