diff mbox

[PATCHv3,6/6] arm64: Add support for CONFIG_DEBUG_VIRTUAL

Message ID 1479431816-5028-7-git-send-email-labbott@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Laura Abbott Nov. 18, 2016, 1:16 a.m. UTC
x86 has an option CONFIG_DEBUG_VIRTUAL to do additional checks
on virt_to_phys calls. The goal is to catch users who are calling
virt_to_phys on non-linear addresses immediately. This inclues callers
using virt_to_phys on image addresses instead of __pa_symbol. As features
such as CONFIG_VMAP_STACK get enabled for arm64, this becomes increasingly
important. Add checks to catch bad virt_to_phys usage.

Signed-off-by: Laura Abbott <labbott@redhat.com>
---
v3: Make use of __pa_symbol required via debug checks. It's a WARN for now but
it can become a BUG after wider testing. __virt_to_phys is
now for linear addresses only. Dropped the VM_BUG_ON from Catalin's suggested
version from the nodebug version since having that in the nodebug version
essentially made them the debug version. Changed to KERNEL_START/KERNEL_END
for bounds checking. More comments.
---
 arch/arm64/Kconfig              |  1 +
 arch/arm64/include/asm/memory.h | 32 ++++++++++++++++++++++++++++----
 arch/arm64/mm/Makefile          |  1 +
 arch/arm64/mm/physaddr.c        | 39 +++++++++++++++++++++++++++++++++++++++
 4 files changed, 69 insertions(+), 4 deletions(-)
 create mode 100644 arch/arm64/mm/physaddr.c

Comments

Mark Rutland Nov. 18, 2016, 5:53 p.m. UTC | #1
Hi,

On Thu, Nov 17, 2016 at 05:16:56PM -0800, Laura Abbott wrote:
> 
> x86 has an option CONFIG_DEBUG_VIRTUAL to do additional checks
> on virt_to_phys calls. The goal is to catch users who are calling
> virt_to_phys on non-linear addresses immediately. This inclues callers
> using virt_to_phys on image addresses instead of __pa_symbol. As features
> such as CONFIG_VMAP_STACK get enabled for arm64, this becomes increasingly
> important. Add checks to catch bad virt_to_phys usage.
> 
> Signed-off-by: Laura Abbott <labbott@redhat.com>
> ---
> v3: Make use of __pa_symbol required via debug checks. It's a WARN for now but
> it can become a BUG after wider testing. __virt_to_phys is
> now for linear addresses only. Dropped the VM_BUG_ON from Catalin's suggested
> version from the nodebug version since having that in the nodebug version
> essentially made them the debug version. Changed to KERNEL_START/KERNEL_END
> for bounds checking. More comments.

I gave this a go with DEBUG_VIRTUAL && KASAN_INLINE selected, and the
kernel dies somewhere before bringing up earlycon. :(

I mentioned some possible reasons in a reply to pastch 5, and I have
some more comments below.

[...]

> -#define __virt_to_phys(x) ({						\
> +
> +
> +/*
> + * This is for translation from the standard linear map to physical addresses.
> + * It is not to be used for kernel symbols.
> + */
> +#define __virt_to_phys_nodebug(x) ({					\
>  	phys_addr_t __x = (phys_addr_t)(x);				\
> -	__x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET :	\
> -				 (__x - kimage_voffset); })
> +	((__x & ~PAGE_OFFSET) + PHYS_OFFSET);				\
> +})

Given the KASAN failure, and the strong possibility that there's even
more stuff lurking in common code, I think we should retain the logic to
handle kernel image addresses for the timebeing (as x86 does). Once
we've merged DEBUG_VIRTUAL, it will be easier to track those down.

Catalin, I think you suggested removing that logic; are you happy for it
to be restored?

See below for a refactoring that retains this logic.

[...]

> +/*
> + * This is for translation from a kernel image/symbol address to a
> + * physical address.
> + */
> +#define __pa_symbol_nodebug(x) ({					\
> +	phys_addr_t __x = (phys_addr_t)(x);				\
> +	(__x - kimage_voffset);						\
> +})

We can avoid duplication here (and in physaddr.c) if we factor the logic
into helpers, e.g.

/*
 * The linear kernel range starts in the middle of the virtual adddress
 * space. Testing the top bit for the start of the region is a
 * sufficient check.
 */
#define __is_lm_address(addr)	(!!((addr) & BIT(VA_BITS - 1)))

#define __lm_to_phys(addr)	(((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
#define __kimg_to_phys(addr)	((addr) - kimage_voffset)

#define __virt_to_phys_nodebug(x) ({					\
	phys_addr_t __x = (phys_addr_t)(x);				\
	__is_lm_address(__x) ? __lm_to_phys(__x) :			\
			       __kimg_to_phys(__x);			\
})

#define __pa_symbol_nodebug(x)	__kimg_to_phys((phys_addr_t)(x))

> +#ifdef CONFIG_DEBUG_VIRTUAL
> +extern unsigned long __virt_to_phys(unsigned long x);
> +extern unsigned long __phys_addr_symbol(unsigned long x);

It would be better for both of these to return phys_addr_t.

[...]

> diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c
> new file mode 100644
> index 0000000..f8eb781
> --- /dev/null
> +++ b/arch/arm64/mm/physaddr.c
> @@ -0,0 +1,39 @@
> +#include <linux/mm.h>
> +
> +#include <asm/memory.h>

We also need:

#include <linux/bug.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/mmdebug.h>

> +unsigned long __virt_to_phys(unsigned long x)
> +{
> +	phys_addr_t __x = (phys_addr_t)x;
> +
> +	if (__x & BIT(VA_BITS - 1)) {
> +		/*
> +		 * The linear kernel range starts in the middle of the virtual
> +		 * adddress space. Testing the top bit for the start of the
> +		 * region is a sufficient check.
> +		 */
> +		return (__x & ~PAGE_OFFSET) + PHYS_OFFSET;
> +	} else {
> +		/*
> +		 * __virt_to_phys should not be used on symbol addresses.
> +		 * This should be changed to a BUG once all basic bad uses have
> +		 * been cleaned up.
> +		 */
> +		WARN(1, "Do not use virt_to_phys on symbol addresses");
> +		return __phys_addr_symbol(x);
> +	}
> +}
> +EXPORT_SYMBOL(__virt_to_phys);

I think this would be better something like:

phys_addr_t __virt_to_phys(unsigned long x)
{
	WARN(!__is_lm_address(x),
	     "virt_to_phys() used for non-linear address: %pK\n",
	     (void*)x);
	
	return __virt_to_phys_nodebug(x);
}
EXPORT_SYMBOL(__virt_to_phys);

> +
> +unsigned long __phys_addr_symbol(unsigned long x)
> +{
> +	phys_addr_t __x = (phys_addr_t)x;
> +
> +	/*
> +	 * This is bounds checking against the kernel image only.
> +	 * __pa_symbol should only be used on kernel symbol addresses.
> +	 */
> +	VIRTUAL_BUG_ON(x < (unsigned long) KERNEL_START || x > (unsigned long) KERNEL_END);
> +	return (__x - kimage_voffset);
> +}
> +EXPORT_SYMBOL(__phys_addr_symbol);

Similarly:

phys_addr_t __phys_addr_symbol(unsigned long x)
{
	/*
	 * This is bounds checking against the kernel image only.
	 * __pa_symbol should only be used on kernel symbol addresses.
	 */
	VIRTUAL_BUG_ON(x < (unsigned long) KERNEL_START ||
		       x > (unsigned long) KERNEL_END);

	return __pa_symbol_nodebug(x);
}
EXPORT_SYMBOL(__phys_addr_symbol);

Thanks,
Mark.
Mark Rutland Nov. 18, 2016, 6:25 p.m. UTC | #2
On Thu, Nov 17, 2016 at 05:16:56PM -0800, Laura Abbott wrote:
> diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
> index 54bb209..0d37c19 100644
> --- a/arch/arm64/mm/Makefile
> +++ b/arch/arm64/mm/Makefile
> @@ -5,6 +5,7 @@ obj-y				:= dma-mapping.o extable.o fault.o init.o \
>  obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
>  obj-$(CONFIG_ARM64_PTDUMP)	+= dump.o
>  obj-$(CONFIG_NUMA)		+= numa.o
> +obj-$(CONFIG_DEBUG_VIRTUAL)	+= physaddr.o

We'll also need:

KASAN_SANITIZE_physaddr.o	:= n

... or code prior to KASAN init will cause the kernel to die if
__virt_to_phys() or __phys_addr_symbol() are called.

>  obj-$(CONFIG_KASAN)		+= kasan_init.o
>  KASAN_SANITIZE_kasan_init.o	:= n

Thanks,
Mark,
Laura Abbott Nov. 18, 2016, 6:42 p.m. UTC | #3
On 11/18/2016 09:53 AM, Mark Rutland wrote:
> Hi,
> 
> On Thu, Nov 17, 2016 at 05:16:56PM -0800, Laura Abbott wrote:
>>
>> x86 has an option CONFIG_DEBUG_VIRTUAL to do additional checks
>> on virt_to_phys calls. The goal is to catch users who are calling
>> virt_to_phys on non-linear addresses immediately. This inclues callers
>> using virt_to_phys on image addresses instead of __pa_symbol. As features
>> such as CONFIG_VMAP_STACK get enabled for arm64, this becomes increasingly
>> important. Add checks to catch bad virt_to_phys usage.
>>
>> Signed-off-by: Laura Abbott <labbott@redhat.com>
>> ---
>> v3: Make use of __pa_symbol required via debug checks. It's a WARN for now but
>> it can become a BUG after wider testing. __virt_to_phys is
>> now for linear addresses only. Dropped the VM_BUG_ON from Catalin's suggested
>> version from the nodebug version since having that in the nodebug version
>> essentially made them the debug version. Changed to KERNEL_START/KERNEL_END
>> for bounds checking. More comments.
> 
> I gave this a go with DEBUG_VIRTUAL && KASAN_INLINE selected, and the
> kernel dies somewhere before bringing up earlycon. :(
> 
> I mentioned some possible reasons in a reply to pastch 5, and I have
> some more comments below.
> 
> [...]
> 
>> -#define __virt_to_phys(x) ({						\
>> +
>> +
>> +/*
>> + * This is for translation from the standard linear map to physical addresses.
>> + * It is not to be used for kernel symbols.
>> + */
>> +#define __virt_to_phys_nodebug(x) ({					\
>>  	phys_addr_t __x = (phys_addr_t)(x);				\
>> -	__x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET :	\
>> -				 (__x - kimage_voffset); })
>> +	((__x & ~PAGE_OFFSET) + PHYS_OFFSET);				\
>> +})
> 
> Given the KASAN failure, and the strong possibility that there's even
> more stuff lurking in common code, I think we should retain the logic to
> handle kernel image addresses for the timebeing (as x86 does). Once
> we've merged DEBUG_VIRTUAL, it will be easier to track those down.
> 

Agreed. I might see about adding another option DEBUG_STRICT_VIRTUAL
for catching bad __pa vs __pa_symbol usage and keep DEBUG_VIRTUAL for
catching addresses that will work in neither case.

> Catalin, I think you suggested removing that logic; are you happy for it
> to be restored?
> 
> See below for a refactoring that retains this logic.
> 
> [...]
> 
>> +/*
>> + * This is for translation from a kernel image/symbol address to a
>> + * physical address.
>> + */
>> +#define __pa_symbol_nodebug(x) ({					\
>> +	phys_addr_t __x = (phys_addr_t)(x);				\
>> +	(__x - kimage_voffset);						\
>> +})
> 
> We can avoid duplication here (and in physaddr.c) if we factor the logic
> into helpers, e.g.
> 
> /*
>  * The linear kernel range starts in the middle of the virtual adddress
>  * space. Testing the top bit for the start of the region is a
>  * sufficient check.
>  */
> #define __is_lm_address(addr)	(!!((addr) & BIT(VA_BITS - 1)))
> 
> #define __lm_to_phys(addr)	(((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
> #define __kimg_to_phys(addr)	((addr) - kimage_voffset)
> 
> #define __virt_to_phys_nodebug(x) ({					\
> 	phys_addr_t __x = (phys_addr_t)(x);				\
> 	__is_lm_address(__x) ? __lm_to_phys(__x) :			\
> 			       __kimg_to_phys(__x);			\
> })
> 
> #define __pa_symbol_nodebug(x)	__kimg_to_phys((phys_addr_t)(x))
> 

Yes, this is much cleaner

>> +#ifdef CONFIG_DEBUG_VIRTUAL
>> +extern unsigned long __virt_to_phys(unsigned long x);
>> +extern unsigned long __phys_addr_symbol(unsigned long x);
> 
> It would be better for both of these to return phys_addr_t.
> 
> [...]
> 

I was worried this would turn into another warning project but
at first pass this works fine and the type will hopefully catch
some bad uses elsewhere.

>> diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c
>> new file mode 100644
>> index 0000000..f8eb781
>> --- /dev/null
>> +++ b/arch/arm64/mm/physaddr.c
>> @@ -0,0 +1,39 @@
>> +#include <linux/mm.h>
>> +
>> +#include <asm/memory.h>
> 
> We also need:
> 
> #include <linux/bug.h>
> #include <linux/export.h>
> #include <linux/types.h>
> #include <linux/mmdebug.h>
> 
>> +unsigned long __virt_to_phys(unsigned long x)
>> +{
>> +	phys_addr_t __x = (phys_addr_t)x;
>> +
>> +	if (__x & BIT(VA_BITS - 1)) {
>> +		/*
>> +		 * The linear kernel range starts in the middle of the virtual
>> +		 * adddress space. Testing the top bit for the start of the
>> +		 * region is a sufficient check.
>> +		 */
>> +		return (__x & ~PAGE_OFFSET) + PHYS_OFFSET;
>> +	} else {
>> +		/*
>> +		 * __virt_to_phys should not be used on symbol addresses.
>> +		 * This should be changed to a BUG once all basic bad uses have
>> +		 * been cleaned up.
>> +		 */
>> +		WARN(1, "Do not use virt_to_phys on symbol addresses");
>> +		return __phys_addr_symbol(x);
>> +	}
>> +}
>> +EXPORT_SYMBOL(__virt_to_phys);
> 
> I think this would be better something like:
> 
> phys_addr_t __virt_to_phys(unsigned long x)
> {
> 	WARN(!__is_lm_address(x),
> 	     "virt_to_phys() used for non-linear address: %pK\n",
> 	     (void*)x);
> 	
> 	return __virt_to_phys_nodebug(x);
> }
> EXPORT_SYMBOL(__virt_to_phys);
> 
>> +
>> +unsigned long __phys_addr_symbol(unsigned long x)
>> +{
>> +	phys_addr_t __x = (phys_addr_t)x;
>> +
>> +	/*
>> +	 * This is bounds checking against the kernel image only.
>> +	 * __pa_symbol should only be used on kernel symbol addresses.
>> +	 */
>> +	VIRTUAL_BUG_ON(x < (unsigned long) KERNEL_START || x > (unsigned long) KERNEL_END);
>> +	return (__x - kimage_voffset);
>> +}
>> +EXPORT_SYMBOL(__phys_addr_symbol);
> 
> Similarly:
> 
> phys_addr_t __phys_addr_symbol(unsigned long x)
> {
> 	/*
> 	 * This is bounds checking against the kernel image only.
> 	 * __pa_symbol should only be used on kernel symbol addresses.
> 	 */
> 	VIRTUAL_BUG_ON(x < (unsigned long) KERNEL_START ||
> 		       x > (unsigned long) KERNEL_END);
> 
> 	return __pa_symbol_nodebug(x);
> }
> EXPORT_SYMBOL(__phys_addr_symbol);
> 
> Thanks,
> Mark.
> 

Thanks,
Laura
Mark Rutland Nov. 18, 2016, 7:05 p.m. UTC | #4
On Fri, Nov 18, 2016 at 10:42:56AM -0800, Laura Abbott wrote:
> On 11/18/2016 09:53 AM, Mark Rutland wrote:
> > On Thu, Nov 17, 2016 at 05:16:56PM -0800, Laura Abbott wrote:

> >> +#define __virt_to_phys_nodebug(x) ({					\
> >>  	phys_addr_t __x = (phys_addr_t)(x);				\
> >> -	__x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET :	\
> >> -				 (__x - kimage_voffset); })
> >> +	((__x & ~PAGE_OFFSET) + PHYS_OFFSET);				\
> >> +})
> > 
> > Given the KASAN failure, and the strong possibility that there's even
> > more stuff lurking in common code, I think we should retain the logic to
> > handle kernel image addresses for the timebeing (as x86 does). Once
> > we've merged DEBUG_VIRTUAL, it will be easier to track those down.
> 
> Agreed. I might see about adding another option DEBUG_STRICT_VIRTUAL
> for catching bad __pa vs __pa_symbol usage and keep DEBUG_VIRTUAL for
> catching addresses that will work in neither case.

I think it makes sense for DEBUG_VIRTUAL to do both, so long as the
default behaviour (and fallback after a WARN for virt_to_phys()) matches
what we currently do. We'll get useful diagnostics, but a graceful
fallback.

I think the helpers I suggested below do that?  Or have I misunderstood,
and you mean something stricter (e.g. checking whether a lm address is
is backed by something)?

> > phys_addr_t __virt_to_phys(unsigned long x)
> > {
> > 	WARN(!__is_lm_address(x),
> > 	     "virt_to_phys() used for non-linear address: %pK\n",
> > 	     (void*)x);
> > 	
> > 	return __virt_to_phys_nodebug(x);
> > }
> > EXPORT_SYMBOL(__virt_to_phys);

> > phys_addr_t __phys_addr_symbol(unsigned long x)
> > {
> > 	/*
> > 	 * This is bounds checking against the kernel image only.
> > 	 * __pa_symbol should only be used on kernel symbol addresses.
> > 	 */
> > 	VIRTUAL_BUG_ON(x < (unsigned long) KERNEL_START ||
> > 		       x > (unsigned long) KERNEL_END);
> > 
> > 	return __pa_symbol_nodebug(x);
> > }
> > EXPORT_SYMBOL(__phys_addr_symbol);

Thanks,
Mark.
Laura Abbott Nov. 18, 2016, 7:17 p.m. UTC | #5
On 11/18/2016 11:05 AM, Mark Rutland wrote:
> On Fri, Nov 18, 2016 at 10:42:56AM -0800, Laura Abbott wrote:
>> On 11/18/2016 09:53 AM, Mark Rutland wrote:
>>> On Thu, Nov 17, 2016 at 05:16:56PM -0800, Laura Abbott wrote:
> 
>>>> +#define __virt_to_phys_nodebug(x) ({					\
>>>>  	phys_addr_t __x = (phys_addr_t)(x);				\
>>>> -	__x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET :	\
>>>> -				 (__x - kimage_voffset); })
>>>> +	((__x & ~PAGE_OFFSET) + PHYS_OFFSET);				\
>>>> +})
>>>
>>> Given the KASAN failure, and the strong possibility that there's even
>>> more stuff lurking in common code, I think we should retain the logic to
>>> handle kernel image addresses for the timebeing (as x86 does). Once
>>> we've merged DEBUG_VIRTUAL, it will be easier to track those down.
>>
>> Agreed. I might see about adding another option DEBUG_STRICT_VIRTUAL
>> for catching bad __pa vs __pa_symbol usage and keep DEBUG_VIRTUAL for
>> catching addresses that will work in neither case.
> 
> I think it makes sense for DEBUG_VIRTUAL to do both, so long as the
> default behaviour (and fallback after a WARN for virt_to_phys()) matches
> what we currently do. We'll get useful diagnostics, but a graceful
> fallback.
> 

I was suggesting making the WARN optional for having this be more useful
before all the __pa_symbol stuff gets cleaned up. Maybe the WARN won't
actually be a hindrance.

Thanks,
Laura

> I think the helpers I suggested below do that?  Or have I misunderstood,
> and you mean something stricter (e.g. checking whether a lm address is
> is backed by something)?
> 
>>> phys_addr_t __virt_to_phys(unsigned long x)
>>> {
>>> 	WARN(!__is_lm_address(x),
>>> 	     "virt_to_phys() used for non-linear address: %pK\n",
>>> 	     (void*)x);
>>> 	
>>> 	return __virt_to_phys_nodebug(x);
>>> }
>>> EXPORT_SYMBOL(__virt_to_phys);
> 
>>> phys_addr_t __phys_addr_symbol(unsigned long x)
>>> {
>>> 	/*
>>> 	 * This is bounds checking against the kernel image only.
>>> 	 * __pa_symbol should only be used on kernel symbol addresses.
>>> 	 */
>>> 	VIRTUAL_BUG_ON(x < (unsigned long) KERNEL_START ||
>>> 		       x > (unsigned long) KERNEL_END);
>>>
>>> 	return __pa_symbol_nodebug(x);
>>> }
>>> EXPORT_SYMBOL(__phys_addr_symbol);
> 
> Thanks,
> Mark.
>
diff mbox

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 969ef88..83b95bc 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -6,6 +6,7 @@  config ARM64
 	select ACPI_MCFG if ACPI
 	select ACPI_SPCR_TABLE if ACPI
 	select ARCH_CLOCKSOURCE_DATA
+	select ARCH_HAS_DEBUG_VIRTUAL
 	select ARCH_HAS_DEVMEM_IS_ALLOWED
 	select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
 	select ARCH_HAS_ELF_RANDOMIZE
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 1e65299..2ed712e 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -167,10 +167,33 @@  extern u64			kimage_voffset;
  * private definitions which should NOT be used outside memory.h
  * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
  */
-#define __virt_to_phys(x) ({						\
+
+
+/*
+ * This is for translation from the standard linear map to physical addresses.
+ * It is not to be used for kernel symbols.
+ */
+#define __virt_to_phys_nodebug(x) ({					\
 	phys_addr_t __x = (phys_addr_t)(x);				\
-	__x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET :	\
-				 (__x - kimage_voffset); })
+	((__x & ~PAGE_OFFSET) + PHYS_OFFSET);				\
+})
+
+/*
+ * This is for translation from a kernel image/symbol address to a
+ * physical address.
+ */
+#define __pa_symbol_nodebug(x) ({					\
+	phys_addr_t __x = (phys_addr_t)(x);				\
+	(__x - kimage_voffset);						\
+})
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern unsigned long __virt_to_phys(unsigned long x);
+extern unsigned long __phys_addr_symbol(unsigned long x);
+#else
+#define __virt_to_phys(x)	__virt_to_phys_nodebug(x)
+#define __phys_addr_symbol(x)	__pa_symbol_nodebug(x)
+#endif
 
 #define __phys_to_virt(x)	((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
 #define __phys_to_kimg(x)	((unsigned long)((x) + kimage_voffset))
@@ -202,7 +225,8 @@  static inline void *phys_to_virt(phys_addr_t x)
  * Drivers should NOT use these either.
  */
 #define __pa(x)			__virt_to_phys((unsigned long)(x))
-#define __pa_symbol(x)		__pa(RELOC_HIDE((unsigned long)(x), 0))
+#define __pa_symbol(x)		__phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
+#define __pa_nodebug(x)		__virt_to_phys_nodebug((unsigned long)(x))
 #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 #define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 54bb209..0d37c19 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -5,6 +5,7 @@  obj-y				:= dma-mapping.o extable.o fault.o init.o \
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
 obj-$(CONFIG_ARM64_PTDUMP)	+= dump.o
 obj-$(CONFIG_NUMA)		+= numa.o
+obj-$(CONFIG_DEBUG_VIRTUAL)	+= physaddr.o
 
 obj-$(CONFIG_KASAN)		+= kasan_init.o
 KASAN_SANITIZE_kasan_init.o	:= n
diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c
new file mode 100644
index 0000000..f8eb781
--- /dev/null
+++ b/arch/arm64/mm/physaddr.c
@@ -0,0 +1,39 @@ 
+#include <linux/mm.h>
+
+#include <asm/memory.h>
+
+unsigned long __virt_to_phys(unsigned long x)
+{
+	phys_addr_t __x = (phys_addr_t)x;
+
+	if (__x & BIT(VA_BITS - 1)) {
+		/*
+		 * The linear kernel range starts in the middle of the virtual
+		 * adddress space. Testing the top bit for the start of the
+		 * region is a sufficient check.
+		 */
+		return (__x & ~PAGE_OFFSET) + PHYS_OFFSET;
+	} else {
+		/*
+		 * __virt_to_phys should not be used on symbol addresses.
+		 * This should be changed to a BUG once all basic bad uses have
+		 * been cleaned up.
+		 */
+		WARN(1, "Do not use virt_to_phys on symbol addresses");
+		return __phys_addr_symbol(x);
+	}
+}
+EXPORT_SYMBOL(__virt_to_phys);
+
+unsigned long __phys_addr_symbol(unsigned long x)
+{
+	phys_addr_t __x = (phys_addr_t)x;
+
+	/*
+	 * This is bounds checking against the kernel image only.
+	 * __pa_symbol should only be used on kernel symbol addresses.
+	 */
+	VIRTUAL_BUG_ON(x < (unsigned long) KERNEL_START || x > (unsigned long) KERNEL_END);
+	return (__x - kimage_voffset);
+}
+EXPORT_SYMBOL(__phys_addr_symbol);