diff mbox

[PATCHv4,05/10] arm64: Use __pa_symbol for kernel symbols

Message ID 1480445729-27130-6-git-send-email-labbott@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Laura Abbott Nov. 29, 2016, 6:55 p.m. UTC
__pa_symbol is technically the marco that should be used for kernel
symbols. Switch to this as a pre-requisite for DEBUG_VIRTUAL which
will do bounds checking. As part of this, introduce lm_alias, a
macro which wraps the __va(__pa(...)) idiom used a few places to
get the alias.

Signed-off-by: Laura Abbott <labbott@redhat.com>
---
v4: Stop calling __va early, conversion of a few more sites. I decided against
wrapping the __p*d_populate calls into new functions since the call sites
should be limited.
---
 arch/arm64/include/asm/kvm_mmu.h          |  4 ++--
 arch/arm64/include/asm/memory.h           |  2 ++
 arch/arm64/include/asm/mmu_context.h      |  6 +++---
 arch/arm64/include/asm/pgtable.h          |  2 +-
 arch/arm64/kernel/acpi_parking_protocol.c |  2 +-
 arch/arm64/kernel/cpu-reset.h             |  2 +-
 arch/arm64/kernel/cpufeature.c            |  2 +-
 arch/arm64/kernel/hibernate.c             | 13 +++++--------
 arch/arm64/kernel/insn.c                  |  2 +-
 arch/arm64/kernel/psci.c                  |  2 +-
 arch/arm64/kernel/setup.c                 |  8 ++++----
 arch/arm64/kernel/smp_spin_table.c        |  2 +-
 arch/arm64/kernel/vdso.c                  |  4 ++--
 arch/arm64/mm/init.c                      | 11 ++++++-----
 arch/arm64/mm/kasan_init.c                | 21 +++++++++++++-------
 arch/arm64/mm/mmu.c                       | 32 +++++++++++++++++++------------
 drivers/firmware/psci.c                   |  2 +-
 include/linux/mm.h                        |  4 ++++
 18 files changed, 70 insertions(+), 51 deletions(-)

Comments

Catalin Marinas Nov. 30, 2016, 5:17 p.m. UTC | #1
On Tue, Nov 29, 2016 at 10:55:24AM -0800, Laura Abbott wrote:
> --- a/arch/arm64/include/asm/memory.h
> +++ b/arch/arm64/include/asm/memory.h
> @@ -205,6 +205,8 @@ static inline void *phys_to_virt(phys_addr_t x)
>  #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
>  #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
>  #define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
> +#define sym_to_pfn(x)	    __phys_to_pfn(__pa_symbol(x))
> +#define lm_alias(x)		__va(__pa_symbol(x))
[...]
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -76,6 +76,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
>  #define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
>  #endif
>  
> +#ifndef lm_alias
> +#define lm_alias(x)	__va(__pa_symbol(x))
> +#endif

You can drop the arm64-specific lm_alias macro as it's the same as the
generic one you introduced in the same patch.
Florian Fainelli Dec. 6, 2016, 12:50 a.m. UTC | #2
On 11/29/2016 10:55 AM, Laura Abbott wrote:
> __pa_symbol is technically the marco that should be used for kernel
> symbols. Switch to this as a pre-requisite for DEBUG_VIRTUAL which
> will do bounds checking. As part of this, introduce lm_alias, a
> macro which wraps the __va(__pa(...)) idiom used a few places to
> get the alias.
> 
> Signed-off-by: Laura Abbott <labbott@redhat.com>
> ---
> v4: Stop calling __va early, conversion of a few more sites. I decided against
> wrapping the __p*d_populate calls into new functions since the call sites
> should be limited.
> ---


> -	pud_populate(&init_mm, pud, bm_pmd);
> +	if (pud_none(*pud))
> +		__pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
>  	pmd = fixmap_pmd(addr);
> -	pmd_populate_kernel(&init_mm, pmd, bm_pte);
> +	__pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);

Is there a particular reason why pmd_populate_kernel() is not changed to
use __pa_symbol() instead of using __pa()? The other users in the arm64
kernel is arch/arm64/kernel/hibernate.c which seems to call this against
kernel symbols as well?
Catalin Marinas Dec. 6, 2016, 11:46 a.m. UTC | #3
On Mon, Dec 05, 2016 at 04:50:33PM -0800, Florian Fainelli wrote:
> On 11/29/2016 10:55 AM, Laura Abbott wrote:
> > __pa_symbol is technically the marco that should be used for kernel
> > symbols. Switch to this as a pre-requisite for DEBUG_VIRTUAL which
> > will do bounds checking. As part of this, introduce lm_alias, a
> > macro which wraps the __va(__pa(...)) idiom used a few places to
> > get the alias.
> > 
> > Signed-off-by: Laura Abbott <labbott@redhat.com>
> > ---
> > v4: Stop calling __va early, conversion of a few more sites. I decided against
> > wrapping the __p*d_populate calls into new functions since the call sites
> > should be limited.
> > ---
> 
> 
> > -	pud_populate(&init_mm, pud, bm_pmd);
> > +	if (pud_none(*pud))
> > +		__pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
> >  	pmd = fixmap_pmd(addr);
> > -	pmd_populate_kernel(&init_mm, pmd, bm_pte);
> > +	__pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
> 
> Is there a particular reason why pmd_populate_kernel() is not changed to
> use __pa_symbol() instead of using __pa()? The other users in the arm64
> kernel is arch/arm64/kernel/hibernate.c which seems to call this against
> kernel symbols as well?

create_safe_exec_page() may allocate a pte from the linear map and
passes such pointer to pmd_populate_kernel(). The copy_pte() function
does something similar. In addition, we have the generic
__pte_alloc_kernel() in mm/memory.c using linear addresses.
Mark Rutland Dec. 6, 2016, 5:02 p.m. UTC | #4
Hi,

As a heads-up, it looks like this got mangled somewhere. In the hunk at
arch/arm64/mm/kasan_init.c:68, 'do' in the context became 'edo'.
Deleting the 'e' makes it apply.

I think this is almost there; other than James's hibernate bug I only
see one real issue, and everything else is a minor nit.

On Tue, Nov 29, 2016 at 10:55:24AM -0800, Laura Abbott wrote:
> __pa_symbol is technically the marco that should be used for kernel
> symbols. Switch to this as a pre-requisite for DEBUG_VIRTUAL which
> will do bounds checking. As part of this, introduce lm_alias, a
> macro which wraps the __va(__pa(...)) idiom used a few places to
> get the alias.

I think the addition of the lm_alias() macro under include/mm should be
a separate preparatory patch. That way it's separate from the
arm64-specific parts, and more obvious to !arm64 people reviewing the
other parts.

> Signed-off-by: Laura Abbott <labbott@redhat.com>
> ---
> v4: Stop calling __va early, conversion of a few more sites. I decided against
> wrapping the __p*d_populate calls into new functions since the call sites
> should be limited.
> ---
>  arch/arm64/include/asm/kvm_mmu.h          |  4 ++--
>  arch/arm64/include/asm/memory.h           |  2 ++
>  arch/arm64/include/asm/mmu_context.h      |  6 +++---
>  arch/arm64/include/asm/pgtable.h          |  2 +-
>  arch/arm64/kernel/acpi_parking_protocol.c |  2 +-
>  arch/arm64/kernel/cpu-reset.h             |  2 +-
>  arch/arm64/kernel/cpufeature.c            |  2 +-
>  arch/arm64/kernel/hibernate.c             | 13 +++++--------
>  arch/arm64/kernel/insn.c                  |  2 +-
>  arch/arm64/kernel/psci.c                  |  2 +-
>  arch/arm64/kernel/setup.c                 |  8 ++++----
>  arch/arm64/kernel/smp_spin_table.c        |  2 +-
>  arch/arm64/kernel/vdso.c                  |  4 ++--
>  arch/arm64/mm/init.c                      | 11 ++++++-----
>  arch/arm64/mm/kasan_init.c                | 21 +++++++++++++-------
>  arch/arm64/mm/mmu.c                       | 32 +++++++++++++++++++------------
>  drivers/firmware/psci.c                   |  2 +-
>  include/linux/mm.h                        |  4 ++++
>  18 files changed, 70 insertions(+), 51 deletions(-)

It looks like we need to make sure these (directly) include <linux/mm.h>
for __pa_symbol() and lm_alias(), or there's some fragility, e.g.

[mark@leverpostej:~/src/linux]% uselinaro 15.08 make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- -j10 -s
arch/arm64/kernel/psci.c: In function 'cpu_psci_cpu_boot':
arch/arm64/kernel/psci.c:48:50: error: implicit declaration of function '__pa_symbol' [-Werror=implicit-function-declaration]
  int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
                                                  ^
cc1: some warnings being treated as errors
make[1]: *** [arch/arm64/kernel/psci.o] Error 1
make: *** [arch/arm64/kernel] Error 2
make: *** Waiting for unfinished jobs....

> --- a/arch/arm64/include/asm/memory.h
> +++ b/arch/arm64/include/asm/memory.h
> @@ -205,6 +205,8 @@ static inline void *phys_to_virt(phys_addr_t x)
>  #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
>  #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
>  #define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
> +#define sym_to_pfn(x)	    __phys_to_pfn(__pa_symbol(x))
> +#define lm_alias(x)		__va(__pa_symbol(x))

As Catalin mentioned, we should be able to drop this copy of lm_alias(),
given we have the same in the core headers.

> diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
> index a2c2478..79cd86b 100644
> --- a/arch/arm64/kernel/vdso.c
> +++ b/arch/arm64/kernel/vdso.c
> @@ -140,11 +140,11 @@ static int __init vdso_init(void)
>  		return -ENOMEM;
>  
>  	/* Grab the vDSO data page. */
> -	vdso_pagelist[0] = pfn_to_page(PHYS_PFN(__pa(vdso_data)));
> +	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
>  
>  	/* Grab the vDSO code pages. */
>  	for (i = 0; i < vdso_pages; i++)
> -		vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i);
> +		vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa_symbol(&vdso_start)) + i);

I see you added sym_to_pfn(), which we can use here to keep this short
and legible. It might also be worth using a temporary pfn_t, e.g.

	pfn = sym_to_pfn(&vdso_start);

	for (i = 0; i < vdso_pages; i++)
		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);

> diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
> index 8263429..9defbe2 100644
> --- a/drivers/firmware/psci.c
> +++ b/drivers/firmware/psci.c
> @@ -383,7 +383,7 @@ static int psci_suspend_finisher(unsigned long index)
>  	u32 *state = __this_cpu_read(psci_power_state);
>  
>  	return psci_ops.cpu_suspend(state[index - 1],
> -				    virt_to_phys(cpu_resume));
> +				    __pa_symbol(cpu_resume));
>  }
>  
>  int psci_cpu_suspend_enter(unsigned long index)

This should probably be its own patch since it's not under arch/arm64/.

I'm happy for this to go via the arm64 tree with the rest regardless
(assuming Lorenzo has no objections).

Thanks,
Mark.
Laura Abbott Dec. 6, 2016, 7:12 p.m. UTC | #5
On 12/06/2016 09:02 AM, Mark Rutland wrote:
> Hi,
> 
> As a heads-up, it looks like this got mangled somewhere. In the hunk at
> arch/arm64/mm/kasan_init.c:68, 'do' in the context became 'edo'.
> Deleting the 'e' makes it apply.
> 

Argh, this must have come in while editing the .patch before e-mailing.
Sorry about that.

> I think this is almost there; other than James's hibernate bug I only
> see one real issue, and everything else is a minor nit.
> 
> On Tue, Nov 29, 2016 at 10:55:24AM -0800, Laura Abbott wrote:
>> __pa_symbol is technically the marco that should be used for kernel
>> symbols. Switch to this as a pre-requisite for DEBUG_VIRTUAL which
>> will do bounds checking. As part of this, introduce lm_alias, a
>> macro which wraps the __va(__pa(...)) idiom used a few places to
>> get the alias.
> 
> I think the addition of the lm_alias() macro under include/mm should be
> a separate preparatory patch. That way it's separate from the
> arm64-specific parts, and more obvious to !arm64 people reviewing the
> other parts.
> 

I debated if it was more obvious to show how it was used in context
vs a stand alone patch. I think you're right that for non-arm64 reviewers
the separate patch would be easier to find.

>> Signed-off-by: Laura Abbott <labbott@redhat.com>
>> ---
>> v4: Stop calling __va early, conversion of a few more sites. I decided against
>> wrapping the __p*d_populate calls into new functions since the call sites
>> should be limited.
>> ---
>>  arch/arm64/include/asm/kvm_mmu.h          |  4 ++--
>>  arch/arm64/include/asm/memory.h           |  2 ++
>>  arch/arm64/include/asm/mmu_context.h      |  6 +++---
>>  arch/arm64/include/asm/pgtable.h          |  2 +-
>>  arch/arm64/kernel/acpi_parking_protocol.c |  2 +-
>>  arch/arm64/kernel/cpu-reset.h             |  2 +-
>>  arch/arm64/kernel/cpufeature.c            |  2 +-
>>  arch/arm64/kernel/hibernate.c             | 13 +++++--------
>>  arch/arm64/kernel/insn.c                  |  2 +-
>>  arch/arm64/kernel/psci.c                  |  2 +-
>>  arch/arm64/kernel/setup.c                 |  8 ++++----
>>  arch/arm64/kernel/smp_spin_table.c        |  2 +-
>>  arch/arm64/kernel/vdso.c                  |  4 ++--
>>  arch/arm64/mm/init.c                      | 11 ++++++-----
>>  arch/arm64/mm/kasan_init.c                | 21 +++++++++++++-------
>>  arch/arm64/mm/mmu.c                       | 32 +++++++++++++++++++------------
>>  drivers/firmware/psci.c                   |  2 +-
>>  include/linux/mm.h                        |  4 ++++
>>  18 files changed, 70 insertions(+), 51 deletions(-)
> 
> It looks like we need to make sure these (directly) include <linux/mm.h>
> for __pa_symbol() and lm_alias(), or there's some fragility, e.g.
> 
> [mark@leverpostej:~/src/linux]% uselinaro 15.08 make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- -j10 -s
> arch/arm64/kernel/psci.c: In function 'cpu_psci_cpu_boot':
> arch/arm64/kernel/psci.c:48:50: error: implicit declaration of function '__pa_symbol' [-Werror=implicit-function-declaration]
>   int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
>                                                   ^
> cc1: some warnings being treated as errors
> make[1]: *** [arch/arm64/kernel/psci.o] Error 1
> make: *** [arch/arm64/kernel] Error 2
> make: *** Waiting for unfinished jobs....
> 

Right, I'll double check. 

>> --- a/arch/arm64/include/asm/memory.h
>> +++ b/arch/arm64/include/asm/memory.h
>> @@ -205,6 +205,8 @@ static inline void *phys_to_virt(phys_addr_t x)
>>  #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
>>  #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
>>  #define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
>> +#define sym_to_pfn(x)	    __phys_to_pfn(__pa_symbol(x))
>> +#define lm_alias(x)		__va(__pa_symbol(x))
> 
> As Catalin mentioned, we should be able to drop this copy of lm_alias(),
> given we have the same in the core headers.
> 
>> diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
>> index a2c2478..79cd86b 100644
>> --- a/arch/arm64/kernel/vdso.c
>> +++ b/arch/arm64/kernel/vdso.c
>> @@ -140,11 +140,11 @@ static int __init vdso_init(void)
>>  		return -ENOMEM;
>>  
>>  	/* Grab the vDSO data page. */
>> -	vdso_pagelist[0] = pfn_to_page(PHYS_PFN(__pa(vdso_data)));
>> +	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
>>  
>>  	/* Grab the vDSO code pages. */
>>  	for (i = 0; i < vdso_pages; i++)
>> -		vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i);
>> +		vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa_symbol(&vdso_start)) + i);
> 
> I see you added sym_to_pfn(), which we can use here to keep this short
> and legible. It might also be worth using a temporary pfn_t, e.g.
> 
> 	pfn = sym_to_pfn(&vdso_start);
> 
> 	for (i = 0; i < vdso_pages; i++)
> 		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
> 

Good idea.

>> diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
>> index 8263429..9defbe2 100644
>> --- a/drivers/firmware/psci.c
>> +++ b/drivers/firmware/psci.c
>> @@ -383,7 +383,7 @@ static int psci_suspend_finisher(unsigned long index)
>>  	u32 *state = __this_cpu_read(psci_power_state);
>>  
>>  	return psci_ops.cpu_suspend(state[index - 1],
>> -				    virt_to_phys(cpu_resume));
>> +				    __pa_symbol(cpu_resume));
>>  }
>>  
>>  int psci_cpu_suspend_enter(unsigned long index)
> 
> This should probably be its own patch since it's not under arch/arm64/.
> 

Fine by me.

> I'm happy for this to go via the arm64 tree with the rest regardless
> (assuming Lorenzo has no objections).
> 
> Thanks,
> Mark.
> 

Thanks,
Laura
diff mbox

Patch

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6f72fe8..55772c1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -47,7 +47,7 @@ 
  * If the page is in the bottom half, we have to use the top half. If
  * the page is in the top half, we have to use the bottom half:
  *
- * T = __virt_to_phys(__hyp_idmap_text_start)
+ * T = __pa_symbol(__hyp_idmap_text_start)
  * if (T & BIT(VA_BITS - 1))
  *	HYP_VA_MIN = 0  //idmap in upper half
  * else
@@ -271,7 +271,7 @@  static inline void __kvm_flush_dcache_pud(pud_t pud)
 	kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
 }
 
-#define kvm_virt_to_phys(x)		__virt_to_phys((unsigned long)(x))
+#define kvm_virt_to_phys(x)		__pa_symbol(x)
 
 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index d773e2c..a219d3f 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -205,6 +205,8 @@  static inline void *phys_to_virt(phys_addr_t x)
 #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 #define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
+#define sym_to_pfn(x)	    __phys_to_pfn(__pa_symbol(x))
+#define lm_alias(x)		__va(__pa_symbol(x))
 
 /*
  *  virt_to_page(k)	convert a _valid_ virtual address to struct page *
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a501853..ea0f969 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -44,7 +44,7 @@  static inline void contextidr_thread_switch(struct task_struct *next)
  */
 static inline void cpu_set_reserved_ttbr0(void)
 {
-	unsigned long ttbr = virt_to_phys(empty_zero_page);
+	unsigned long ttbr = __pa_symbol(empty_zero_page);
 
 	write_sysreg(ttbr, ttbr0_el1);
 	isb();
@@ -113,7 +113,7 @@  static inline void cpu_install_idmap(void)
 	local_flush_tlb_all();
 	cpu_set_idmap_tcr_t0sz();
 
-	cpu_switch_mm(idmap_pg_dir, &init_mm);
+	cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
 }
 
 /*
@@ -128,7 +128,7 @@  static inline void cpu_replace_ttbr1(pgd_t *pgd)
 
 	phys_addr_t pgd_phys = virt_to_phys(pgd);
 
-	replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
+	replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
 
 	cpu_install_idmap();
 	replace_phys(pgd_phys);
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index ffbb9a5..090134c 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -52,7 +52,7 @@  extern void __pgd_error(const char *file, int line, unsigned long val);
  * for zero-mapped memory areas etc..
  */
 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr)	pfn_to_page(PHYS_PFN(__pa(empty_zero_page)))
+#define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
 
 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
 
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
index a32b401..df58310 100644
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -109,7 +109,7 @@  static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
 	 * that read this address need to convert this address to the
 	 * Boot-Loader's endianness before jumping.
 	 */
-	writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
+	writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point);
 	writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
 
 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index d4e9ecb..6c2b1b4 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -24,7 +24,7 @@  static inline void __noreturn cpu_soft_restart(unsigned long el2_switch,
 
 	el2_switch = el2_switch && !is_kernel_in_hyp_mode() &&
 		is_hyp_mode_available();
-	restart = (void *)virt_to_phys(__cpu_soft_restart);
+	restart = (void *)__pa_symbol(__cpu_soft_restart);
 
 	cpu_install_idmap();
 	restart(el2_switch, entry, arg0, arg1, arg2);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c02504e..6ccadf2 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -736,7 +736,7 @@  static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused
 static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
 			   int __unused)
 {
-	phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start);
+	phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
 
 	/*
 	 * Activate the lower HYP offset only if:
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index d55a7b0..4f0c77d 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -50,9 +50,6 @@ 
  */
 extern int in_suspend;
 
-/* Find a symbols alias in the linear map */
-#define LMADDR(x)	phys_to_virt(virt_to_phys(x))
-
 /* Do we need to reset el2? */
 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
 
@@ -102,8 +99,8 @@  static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
 
 int pfn_is_nosave(unsigned long pfn)
 {
-	unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
-	unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
+	unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
+	unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
 
 	return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
 }
@@ -125,12 +122,12 @@  int arch_hibernation_header_save(void *addr, unsigned int max_size)
 		return -EOVERFLOW;
 
 	arch_hdr_invariants(&hdr->invariants);
-	hdr->ttbr1_el1		= virt_to_phys(swapper_pg_dir);
+	hdr->ttbr1_el1		= __pa_symbol(swapper_pg_dir);
 	hdr->reenter_kernel	= _cpu_resume;
 
 	/* We can't use __hyp_get_vectors() because kvm may still be loaded */
 	if (el2_reset_needed())
-		hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
+		hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
 	else
 		hdr->__hyp_stub_vectors = 0;
 
@@ -484,7 +481,7 @@  int swsusp_arch_resume(void)
 	 * Since we only copied the linear map, we need to find restore_pblist's
 	 * linear map address.
 	 */
-	lm_restore_pblist = LMADDR(restore_pblist);
+	lm_restore_pblist = lm_alias(restore_pblist);
 
 	/*
 	 * We need a zero page that is zero before & after resume in order to
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 6f2ac4f..f607b38 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -97,7 +97,7 @@  static void __kprobes *patch_map(void *addr, int fixmap)
 	if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
 		page = vmalloc_to_page(addr);
 	else if (!module)
-		page = pfn_to_page(PHYS_PFN(__pa(addr)));
+		page = phys_to_page(__pa_symbol(addr));
 	else
 		return addr;
 
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 42816be..f0f2abb 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -45,7 +45,7 @@  static int __init cpu_psci_cpu_prepare(unsigned int cpu)
 
 static int cpu_psci_cpu_boot(unsigned int cpu)
 {
-	int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+	int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
 	if (err)
 		pr_err("failed to boot CPU%d (%d)\n", cpu, err);
 
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f534f49..e2dbc02 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -199,10 +199,10 @@  static void __init request_standard_resources(void)
 	struct memblock_region *region;
 	struct resource *res;
 
-	kernel_code.start   = virt_to_phys(_text);
-	kernel_code.end     = virt_to_phys(__init_begin - 1);
-	kernel_data.start   = virt_to_phys(_sdata);
-	kernel_data.end     = virt_to_phys(_end - 1);
+	kernel_code.start   = __pa_symbol(_text);
+	kernel_code.end     = __pa_symbol(__init_begin - 1);
+	kernel_data.start   = __pa_symbol(_sdata);
+	kernel_data.end     = __pa_symbol(_end - 1);
 
 	for_each_memblock(memory, region) {
 		res = alloc_bootmem_low(sizeof(*res));
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 9a00eee..25fccca 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -98,7 +98,7 @@  static int smp_spin_table_cpu_prepare(unsigned int cpu)
 	 * boot-loader's endianess before jumping. This is mandated by
 	 * the boot protocol.
 	 */
-	writeq_relaxed(__pa(secondary_holding_pen), release_addr);
+	writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
 	__flush_dcache_area((__force void *)release_addr,
 			    sizeof(*release_addr));
 
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index a2c2478..79cd86b 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -140,11 +140,11 @@  static int __init vdso_init(void)
 		return -ENOMEM;
 
 	/* Grab the vDSO data page. */
-	vdso_pagelist[0] = pfn_to_page(PHYS_PFN(__pa(vdso_data)));
+	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
 
 	/* Grab the vDSO code pages. */
 	for (i = 0; i < vdso_pages; i++)
-		vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i);
+		vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa_symbol(&vdso_start)) + i);
 
 	vdso_spec[0].pages = &vdso_pagelist[0];
 	vdso_spec[1].pages = &vdso_pagelist[1];
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 212c4d1..95ef998 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -209,8 +209,8 @@  void __init arm64_memblock_init(void)
 	 * linear mapping. Take care not to clip the kernel which may be
 	 * high in memory.
 	 */
-	memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
-			ULLONG_MAX);
+	memblock_remove(max_t(u64, memstart_addr + linear_region_size,
+			__pa_symbol(_end)), ULLONG_MAX);
 	if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
 		/* ensure that memstart_addr remains sufficiently aligned */
 		memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
@@ -225,7 +225,7 @@  void __init arm64_memblock_init(void)
 	 */
 	if (memory_limit != (phys_addr_t)ULLONG_MAX) {
 		memblock_mem_limit_remove_map(memory_limit);
-		memblock_add(__pa(_text), (u64)(_end - _text));
+		memblock_add(__pa_symbol(_text), (u64)(_end - _text));
 	}
 
 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
@@ -278,7 +278,7 @@  void __init arm64_memblock_init(void)
 	 * Register the kernel text, kernel data, initrd, and initial
 	 * pagetables with memblock.
 	 */
-	memblock_reserve(__pa(_text), _end - _text);
+	memblock_reserve(__pa_symbol(_text), _end - _text);
 #ifdef CONFIG_BLK_DEV_INITRD
 	if (initrd_start) {
 		memblock_reserve(initrd_start, initrd_end - initrd_start);
@@ -483,7 +483,8 @@  void __init mem_init(void)
 
 void free_initmem(void)
 {
-	free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)),
+	free_reserved_area(lm_alias(__init_begin),
+			   lm_alias(__init_end),
 			   0, "unused kernel");
 	/*
 	 * Unmap the __init region but leave the VM area in place. This
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 757009d..0fb8110 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -26,6 +26,13 @@ 
 
 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
 
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). All the early functions are called too
+ * early to use lm_alias so __p*d_populate functions must be used to populate
+ * with the physical address from __pa_symbol.
+ */
+
 static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
 					unsigned long end)
 {
@@ -33,12 +40,12 @@  static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
 	unsigned long next;
 
 	if (pmd_none(*pmd))
-		pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+		__pmd_populate(pmd, __pa_symbol(kasan_zero_pte), PMD_TYPE_TABLE);
 
 	pte = pte_offset_kimg(pmd, addr);
 	do {
 		next = addr + PAGE_SIZE;
-		set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
+		set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
 					PAGE_KERNEL));
 	} while (pte++, addr = next, addr != end && pte_none(*pte));
 }
@@ -51,7 +58,7 @@  static void __init kasan_early_pmd_populate(pud_t *pud,
 	unsigned long next;
 
 	if (pud_none(*pud))
-		pud_populate(&init_mm, pud, kasan_zero_pmd);
+		__pud_populate(pud, __pa_symbol(kasan_zero_pmd), PMD_TYPE_TABLE);
 
 	pmd = pmd_offset_kimg(pud, addr);
 	do {
@@ -68,7 +75,7 @@  static void __init kasan_early_pud_populate(pgd_t *pgd,
 	unsigned long next;
 
 	if (pgd_none(*pgd))
-		pgd_populate(&init_mm, pgd, kasan_zero_pud);
+		__pgd_populate(pgd, __pa_symbol(kasan_zero_pud), PUD_TYPE_TABLE);
 
 	pud = pud_offset_kimg(pgd, addr);
 	edo {
@@ -148,7 +155,7 @@  void __init kasan_init(void)
 	 */
 	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
 	dsb(ishst);
-	cpu_replace_ttbr1(tmp_pg_dir);
+	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
 
 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
@@ -199,10 +206,10 @@  void __init kasan_init(void)
 	 */
 	for (i = 0; i < PTRS_PER_PTE; i++)
 		set_pte(&kasan_zero_pte[i],
-			pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+			pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
 
 	memset(kasan_zero_page, 0, PAGE_SIZE);
-	cpu_replace_ttbr1(swapper_pg_dir);
+	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 
 	/* At this point kasan is fully initialized. Enable error messages */
 	init_task.kasan_depth = 0;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 05615a3..7498ebd 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -319,8 +319,8 @@  static void create_mapping_late(phys_addr_t phys, unsigned long virt,
 
 static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
 {
-	unsigned long kernel_start = __pa(_text);
-	unsigned long kernel_end = __pa(__init_begin);
+	unsigned long kernel_start = __pa_symbol(_text);
+	unsigned long kernel_end = __pa_symbol(__init_begin);
 
 	/*
 	 * Take care not to create a writable alias for the
@@ -387,21 +387,21 @@  void mark_rodata_ro(void)
 	unsigned long section_size;
 
 	section_size = (unsigned long)_etext - (unsigned long)_text;
-	create_mapping_late(__pa(_text), (unsigned long)_text,
+	create_mapping_late(__pa_symbol(_text), (unsigned long)_text,
 			    section_size, PAGE_KERNEL_ROX);
 	/*
 	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
 	 * to cover NOTES and EXCEPTION_TABLE.
 	 */
 	section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
-	create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
+	create_mapping_late(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
 			    section_size, PAGE_KERNEL_RO);
 }
 
 static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
 				      pgprot_t prot, struct vm_struct *vma)
 {
-	phys_addr_t pa_start = __pa(va_start);
+	phys_addr_t pa_start = __pa_symbol(va_start);
 	unsigned long size = va_end - va_start;
 
 	BUG_ON(!PAGE_ALIGNED(pa_start));
@@ -449,7 +449,7 @@  static void __init map_kernel(pgd_t *pgd)
 		 */
 		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
 		set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
-			__pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
+			__pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE));
 		pud_clear_fixmap();
 	} else {
 		BUG();
@@ -480,7 +480,7 @@  void __init paging_init(void)
 	 */
 	cpu_replace_ttbr1(__va(pgd_phys));
 	memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
-	cpu_replace_ttbr1(swapper_pg_dir);
+	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 
 	pgd_clear_fixmap();
 	memblock_free(pgd_phys, PAGE_SIZE);
@@ -489,7 +489,7 @@  void __init paging_init(void)
 	 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
 	 * allocated with it.
 	 */
-	memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
+	memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
 		      SWAPPER_DIR_SIZE - PAGE_SIZE);
 }
 
@@ -600,6 +600,12 @@  static inline pte_t * fixmap_pte(unsigned long addr)
 	return &bm_pte[pte_index(addr)];
 }
 
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). This function is called too early to use
+ * lm_alias so __p*d_populate functions must be used to populate with the
+ * physical address from __pa_symbol.
+ */
 void __init early_fixmap_init(void)
 {
 	pgd_t *pgd;
@@ -609,7 +615,7 @@  void __init early_fixmap_init(void)
 
 	pgd = pgd_offset_k(addr);
 	if (CONFIG_PGTABLE_LEVELS > 3 &&
-	    !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
+	    !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
 		/*
 		 * We only end up here if the kernel mapping and the fixmap
 		 * share the top level pgd entry, which should only happen on
@@ -618,12 +624,14 @@  void __init early_fixmap_init(void)
 		BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
 		pud = pud_offset_kimg(pgd, addr);
 	} else {
-		pgd_populate(&init_mm, pgd, bm_pud);
+		if (pgd_none(*pgd))
+			__pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
 		pud = fixmap_pud(addr);
 	}
-	pud_populate(&init_mm, pud, bm_pmd);
+	if (pud_none(*pud))
+		__pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
 	pmd = fixmap_pmd(addr);
-	pmd_populate_kernel(&init_mm, pmd, bm_pte);
+	__pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
 
 	/*
 	 * The boot-ioremap range spans multiple pmds, for which
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 8263429..9defbe2 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -383,7 +383,7 @@  static int psci_suspend_finisher(unsigned long index)
 	u32 *state = __this_cpu_read(psci_power_state);
 
 	return psci_ops.cpu_suspend(state[index - 1],
-				    virt_to_phys(cpu_resume));
+				    __pa_symbol(cpu_resume));
 }
 
 int psci_cpu_suspend_enter(unsigned long index)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a92c8d7..88556b8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -76,6 +76,10 @@  extern int mmap_rnd_compat_bits __read_mostly;
 #define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
 #endif
 
+#ifndef lm_alias
+#define lm_alias(x)	__va(__pa_symbol(x))
+#endif
+
 /*
  * To prevent common memory management code establishing
  * a zero page mapping on a read fault.