@@ -27,7 +27,7 @@
#ifdef CONFIG_MMU
-bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+bool copy_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
unsigned long addr = (unsigned long)unsafe_src;
@@ -3,7 +3,7 @@
#include <linux/uaccess.h>
#include <linux/kernel.h>
-bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+bool copy_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
/* highest bit set means kernel space */
return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1);
@@ -3,7 +3,7 @@
#include <linux/uaccess.h>
#include <linux/kernel.h>
-bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+bool copy_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
/* highest bit set means kernel space */
return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1);
@@ -48,7 +48,7 @@ void * memcpy(void * dst,const void *src, size_t count)
EXPORT_SYMBOL(memcpy);
-bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+bool copy_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
if ((unsigned long)unsafe_src < PAGE_SIZE)
return false;
@@ -7,7 +7,7 @@
#include <asm/inst.h>
#include <asm/ppc-opcode.h>
-bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+bool copy_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
return is_kernel_addr((unsigned long)unsafe_src);
}
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <os.h>
-bool copy_from_kernel_nofault_allowed(const void *src, size_t size)
+bool copy_kernel_nofault_allowed(const void *src, size_t size)
{
void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
@@ -6,7 +6,7 @@
#include <asm/vsyscall.h>
#ifdef CONFIG_X86_64
-bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+bool copy_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
unsigned long vaddr = (unsigned long)unsafe_src;
@@ -36,7 +36,7 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
}
#else
-bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+bool copy_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
return (unsigned long)unsafe_src >= TASK_SIZE_MAX;
}
@@ -387,7 +387,7 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
return 0;
}
-bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
+bool copy_kernel_nofault_allowed(const void *unsafe_src, size_t size);
long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
@@ -7,8 +7,7 @@
#include <linux/uaccess.h>
#include <asm/tlb.h>
-bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
- size_t size)
+bool __weak copy_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
return true;
}
@@ -28,7 +27,7 @@ long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
align = (unsigned long)dst | (unsigned long)src;
- if (!copy_from_kernel_nofault_allowed(src, size))
+ if (!copy_kernel_nofault_allowed(src, size))
return -ERANGE;
pagefault_disable();
@@ -83,7 +82,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
if (unlikely(count <= 0))
return 0;
- if (!copy_from_kernel_nofault_allowed(unsafe_addr, count))
+ if (!copy_kernel_nofault_allowed(unsafe_addr, count))
return -ERANGE;
pagefault_disable();