diff mbox series

define vmemmap before pfn_to_page calls

Message ID 20191217131530.513096-1-david.abdurachmanov@sifive.com (mailing list archive)
State New, archived
Headers show
Series define vmemmap before pfn_to_page calls | expand

Commit Message

David Abdurachmanov Dec. 17, 2019, 1:15 p.m. UTC
pfn_to_page call depends on `vmemmap` being available before the call.
This caused compilation errors in Fedora/RISCV with 5.5-rc2 and was caused
by NOMMU changes which moved declarations after functions definitions.

Signed-off-by: David Abdurachmanov <david.abdurachmanov@sifive.com>
Fixes: 6bd33e1ece52 ("riscv: add nommu support")
---
 arch/riscv/include/asm/pgtable.h | 34 ++++++++++++++++----------------
 1 file changed, 17 insertions(+), 17 deletions(-)

Comments

Anup Patel Dec. 18, 2019, 3:46 a.m. UTC | #1
On Tue, Dec 17, 2019 at 6:45 PM David Abdurachmanov
<david.abdurachmanov@gmail.com> wrote:
>
> pfn_to_page call depends on `vmemmap` being available before the call.
> This caused compilation errors in Fedora/RISCV with 5.5-rc2 and was caused
> by NOMMU changes which moved declarations after functions definitions.
>
> Signed-off-by: David Abdurachmanov <david.abdurachmanov@sifive.com>
> Fixes: 6bd33e1ece52 ("riscv: add nommu support")
> ---
>  arch/riscv/include/asm/pgtable.h | 34 ++++++++++++++++----------------
>  1 file changed, 17 insertions(+), 17 deletions(-)
>
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index 7ff0ed4f292e..d8c89e6e6b3d 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -90,6 +90,23 @@ extern pgd_t swapper_pg_dir[];
>  #define __S110 PAGE_SHARED_EXEC
>  #define __S111 PAGE_SHARED_EXEC
>
> +#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
> +#define VMALLOC_END      (PAGE_OFFSET - 1)
> +#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
> +
> +/*
> + * Roughly size the vmemmap space to be large enough to fit enough
> + * struct pages to map half the virtual address space. Then
> + * position vmemmap directly below the VMALLOC region.
> + */
> +#define VMEMMAP_SHIFT \
> +       (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
> +#define VMEMMAP_SIZE   BIT(VMEMMAP_SHIFT)
> +#define VMEMMAP_END    (VMALLOC_START - 1)
> +#define VMEMMAP_START  (VMALLOC_START - VMEMMAP_SIZE)
> +
> +#define vmemmap                ((struct page *)VMEMMAP_START)
> +
>  static inline int pmd_present(pmd_t pmd)
>  {
>         return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
> @@ -400,23 +417,6 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
>  #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
>  #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
>
> -#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
> -#define VMALLOC_END      (PAGE_OFFSET - 1)
> -#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
> -
> -/*
> - * Roughly size the vmemmap space to be large enough to fit enough
> - * struct pages to map half the virtual address space. Then
> - * position vmemmap directly below the VMALLOC region.
> - */
> -#define VMEMMAP_SHIFT \
> -       (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
> -#define VMEMMAP_SIZE   BIT(VMEMMAP_SHIFT)
> -#define VMEMMAP_END    (VMALLOC_START - 1)
> -#define VMEMMAP_START  (VMALLOC_START - VMEMMAP_SIZE)
> -
> -#define vmemmap                ((struct page *)VMEMMAP_START)
> -
>  #define PCI_IO_SIZE      SZ_16M
>  #define PCI_IO_END       VMEMMAP_START
>  #define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
> --
> 2.23.0
>

Can you add a comment for "#define vmemmap" about your findings ?

Otherwise looks good to me.

Reviewed-by: Anup Patel <anup@brainfault.org>

Regards,
Anup
David Abdurachmanov Dec. 18, 2019, 5:08 a.m. UTC | #2
On Wed, Dec 18, 2019 at 5:46 AM Anup Patel <anup@brainfault.org> wrote:
>
> On Tue, Dec 17, 2019 at 6:45 PM David Abdurachmanov
> <david.abdurachmanov@gmail.com> wrote:
> >
> > pfn_to_page call depends on `vmemmap` being available before the call.
> > This caused compilation errors in Fedora/RISCV with 5.5-rc2 and was caused
> > by NOMMU changes which moved declarations after functions definitions.
> >
> > Signed-off-by: David Abdurachmanov <david.abdurachmanov@sifive.com>
> > Fixes: 6bd33e1ece52 ("riscv: add nommu support")
> > ---
> >  arch/riscv/include/asm/pgtable.h | 34 ++++++++++++++++----------------
> >  1 file changed, 17 insertions(+), 17 deletions(-)
> >
> > diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> > index 7ff0ed4f292e..d8c89e6e6b3d 100644
> > --- a/arch/riscv/include/asm/pgtable.h
> > +++ b/arch/riscv/include/asm/pgtable.h
> > @@ -90,6 +90,23 @@ extern pgd_t swapper_pg_dir[];
> >  #define __S110 PAGE_SHARED_EXEC
> >  #define __S111 PAGE_SHARED_EXEC
> >
> > +#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
> > +#define VMALLOC_END      (PAGE_OFFSET - 1)
> > +#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
> > +
> > +/*
> > + * Roughly size the vmemmap space to be large enough to fit enough
> > + * struct pages to map half the virtual address space. Then
> > + * position vmemmap directly below the VMALLOC region.
> > + */
> > +#define VMEMMAP_SHIFT \
> > +       (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
> > +#define VMEMMAP_SIZE   BIT(VMEMMAP_SHIFT)
> > +#define VMEMMAP_END    (VMALLOC_START - 1)
> > +#define VMEMMAP_START  (VMALLOC_START - VMEMMAP_SIZE)
> > +
> > +#define vmemmap                ((struct page *)VMEMMAP_START)
> > +
> >  static inline int pmd_present(pmd_t pmd)
> >  {
> >         return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
> > @@ -400,23 +417,6 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
> >  #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
> >  #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
> >
> > -#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
> > -#define VMALLOC_END      (PAGE_OFFSET - 1)
> > -#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
> > -
> > -/*
> > - * Roughly size the vmemmap space to be large enough to fit enough
> > - * struct pages to map half the virtual address space. Then
> > - * position vmemmap directly below the VMALLOC region.
> > - */
> > -#define VMEMMAP_SHIFT \
> > -       (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
> > -#define VMEMMAP_SIZE   BIT(VMEMMAP_SHIFT)
> > -#define VMEMMAP_END    (VMALLOC_START - 1)
> > -#define VMEMMAP_START  (VMALLOC_START - VMEMMAP_SIZE)
> > -
> > -#define vmemmap                ((struct page *)VMEMMAP_START)
> > -
> >  #define PCI_IO_SIZE      SZ_16M
> >  #define PCI_IO_END       VMEMMAP_START
> >  #define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
> > --
> > 2.23.0
> >
>
> Can you add a comment for "#define vmemmap" about your findings ?

I send v2 in a few hours with extra comment. I will mention that this
is needed if CONFIG_SPARSEMEM_VMEMMAP=y
See https://github.com/torvalds/linux/blob/master/include/asm-generic/memory_model.h

>
> Otherwise looks good to me.
>
> Reviewed-by: Anup Patel <anup@brainfault.org>
>
> Regards,
> Anup
Andreas Schwab Dec. 18, 2019, 8:30 a.m. UTC | #3
On Dez 17 2019, David Abdurachmanov wrote:

> pfn_to_page call depends on `vmemmap` being available before the call.

Only if CONFIG_SPARSEMEM_VMEMMAP, as it seems.

Andreas.
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7ff0ed4f292e..d8c89e6e6b3d 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -90,6 +90,23 @@  extern pgd_t swapper_pg_dir[];
 #define __S110	PAGE_SHARED_EXEC
 #define __S111	PAGE_SHARED_EXEC
 
+#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END      (PAGE_OFFSET - 1)
+#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
+
+/*
+ * Roughly size the vmemmap space to be large enough to fit enough
+ * struct pages to map half the virtual address space. Then
+ * position vmemmap directly below the VMALLOC region.
+ */
+#define VMEMMAP_SHIFT \
+	(CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE	BIT(VMEMMAP_SHIFT)
+#define VMEMMAP_END	(VMALLOC_START - 1)
+#define VMEMMAP_START	(VMALLOC_START - VMEMMAP_SIZE)
+
+#define vmemmap		((struct page *)VMEMMAP_START)
+
 static inline int pmd_present(pmd_t pmd)
 {
 	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
@@ -400,23 +417,6 @@  static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
 
-#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
-#define VMALLOC_END      (PAGE_OFFSET - 1)
-#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
-
-/*
- * Roughly size the vmemmap space to be large enough to fit enough
- * struct pages to map half the virtual address space. Then
- * position vmemmap directly below the VMALLOC region.
- */
-#define VMEMMAP_SHIFT \
-	(CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
-#define VMEMMAP_SIZE	BIT(VMEMMAP_SHIFT)
-#define VMEMMAP_END	(VMALLOC_START - 1)
-#define VMEMMAP_START	(VMALLOC_START - VMEMMAP_SIZE)
-
-#define vmemmap		((struct page *)VMEMMAP_START)
-
 #define PCI_IO_SIZE      SZ_16M
 #define PCI_IO_END       VMEMMAP_START
 #define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)