@@ -11,6 +11,7 @@ config M68K
select ARCH_NO_PREEMPT if !COLDFIRE
select ARCH_USE_MEMTEST if MMU_MOTOROLA
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_HAS_VM_GET_PAGE_PROT
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
select DMA_DIRECT_REMAP if HAS_DMA && MMU && !COLDFIRE
select GENERIC_ATOMIC64
@@ -86,65 +86,6 @@
| CF_PAGE_READABLE \
| CF_PAGE_DIRTY)
-/*
- * Page protections for initialising protection_map. See mm/mmap.c
- * for use. In general, the bit positions are xwr, and P-items are
- * private, the S-items are shared.
- */
-#define __P000 PAGE_NONE
-#define __P001 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE)
-#define __P010 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_WRITABLE)
-#define __P011 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_WRITABLE)
-#define __P100 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_EXEC)
-#define __P101 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-#define __P110 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_WRITABLE \
- | CF_PAGE_EXEC)
-#define __P111 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_WRITABLE \
- | CF_PAGE_EXEC)
-
-#define __S000 PAGE_NONE
-#define __S001 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE)
-#define __S010 PAGE_SHARED
-#define __S011 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_READABLE)
-#define __S100 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_EXEC)
-#define __S101 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-#define __S110 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_EXEC)
-#define __S111 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-
#define PTE_MASK PAGE_MASK
#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
@@ -83,28 +83,6 @@ extern unsigned long mm_cachebits;
#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
-/*
- * The m68k can't do page protection for execute, and considers that the same are read.
- * Also, write permissions imply read permissions. This is the closest we can get..
- */
-#define __P000 PAGE_NONE_C
-#define __P001 PAGE_READONLY_C
-#define __P010 PAGE_COPY_C
-#define __P011 PAGE_COPY_C
-#define __P100 PAGE_READONLY_C
-#define __P101 PAGE_READONLY_C
-#define __P110 PAGE_COPY_C
-#define __P111 PAGE_COPY_C
-
-#define __S000 PAGE_NONE_C
-#define __S001 PAGE_READONLY_C
-#define __S010 PAGE_SHARED_C
-#define __S011 PAGE_SHARED_C
-#define __S100 PAGE_READONLY_C
-#define __S101 PAGE_READONLY_C
-#define __S110 PAGE_SHARED_C
-#define __S111 PAGE_SHARED_C
-
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
/*
@@ -66,28 +66,6 @@
| SUN3_PAGE_SYSTEM \
| SUN3_PAGE_NOCACHE)
-/*
- * Page protections for initialising protection_map. The sun3 has only two
- * protection settings, valid (implying read and execute) and writeable. These
- * are as close as we can get...
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
/* Use these fake page-protections on PMDs. */
#define SUN3_PMD_VALID (0x00000001)
@@ -128,3 +128,90 @@ void __init mem_init(void)
memblock_free_all();
init_pointer_tables();
}
+
+#ifdef CONFIG_COLDFIRE
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_READABLE);
+ case VM_WRITE:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_WRITABLE);
+ case VM_READ | VM_WRITE:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_READABLE|CF_PAGE_WRITABLE);
+ case VM_EXEC:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_EXEC);
+ case VM_EXEC | VM_READ:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_READABLE|CF_PAGE_EXEC);
+ case VM_EXEC | VM_WRITE:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_WRITABLE|CF_PAGE_EXEC);
+ case VM_EXEC | VM_READ | VM_WRITE:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_READABLE|CF_PAGE_WRITABLE|
+ CF_PAGE_EXEC);
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_READABLE);
+ case VM_SHARED | VM_WRITE:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_READ | VM_WRITE:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_READABLE|CF_PAGE_SHARED);
+ case VM_SHARED | VM_EXEC:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_EXEC);
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_READABLE|CF_PAGE_EXEC);
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_SHARED|CF_PAGE_EXEC);
+ case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE:
+ return __pgprot(CF_PAGE_VALID|CF_PAGE_ACCESSED|CF_PAGE_READABLE|CF_PAGE_SHARED|
+ CF_PAGE_EXEC);
+ default:
+ BUILD_BUG();
+ }
+}
+#endif
+
+#ifdef CONFIG_SUN3
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ return PAGE_READONLY;
+ case VM_WRITE:
+ return PAGE_COPY;
+ case VM_READ | VM_WRITE:
+ return PAGE_COPY;
+ case VM_EXEC:
+ return PAGE_READONLY;
+ case VM_EXEC | VM_READ:
+ return PAGE_READONLY;
+ case VM_EXEC | VM_WRITE:
+ return PAGE_COPY;
+ case VM_EXEC | VM_READ | VM_WRITE:
+ return PAGE_COPY;
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_WRITE:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_READ | VM_WRITE:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_EXEC:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE:
+ return PAGE_SHARED;
+ default:
+ BUILD_BUG();
+ }
+}
+#endif
+EXPORT_SYMBOL(vm_get_page_prot);
@@ -400,12 +400,9 @@ void __init paging_init(void)
/* Fix the cache mode in the page descriptors for the 680[46]0. */
if (CPU_IS_040_OR_060) {
- int i;
#ifndef mm_cachebits
mm_cachebits = _PAGE_CACHE040;
#endif
- for (i = 0; i < 16; i++)
- pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
}
min_addr = m68k_memory[0].addr;
@@ -483,3 +480,44 @@ void __init paging_init(void)
max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
free_area_init(max_zone_pfn);
}
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return __pgprot(pgprot_val(PAGE_NONE_C)|_PAGE_CACHE040);
+ case VM_READ:
+ return __pgprot(pgprot_val(PAGE_READONLY_C)|_PAGE_CACHE040);
+ case VM_WRITE:
+ return __pgprot(pgprot_val(PAGE_COPY_C)|_PAGE_CACHE040);
+ case VM_READ | VM_WRITE:
+ return __pgprot(pgprot_val(PAGE_COPY_C)|_PAGE_CACHE040);
+ case VM_EXEC:
+ return __pgprot(pgprot_val(PAGE_READONLY_C)|_PAGE_CACHE040);
+ case VM_EXEC | VM_READ:
+ return __pgprot(pgprot_val(PAGE_READONLY_C)|_PAGE_CACHE040);
+ case VM_EXEC | VM_WRITE:
+ return __pgprot(pgprot_val(PAGE_COPY_C)|_PAGE_CACHE040);
+ case VM_EXEC | VM_READ | VM_WRITE:
+ return __pgprot(pgprot_val(PAGE_COPY_C)|_PAGE_CACHE040);
+ case VM_SHARED:
+ return __pgprot(pgprot_val(PAGE_NONE_C)|_PAGE_CACHE040);
+ case VM_SHARED | VM_READ:
+ return __pgprot(pgprot_val(PAGE_READONLY_C)|_PAGE_CACHE040);
+ case VM_SHARED | VM_WRITE:
+ return __pgprot(pgprot_val(PAGE_SHARED_C)|_PAGE_CACHE040);
+ case VM_SHARED | VM_READ | VM_WRITE:
+ return __pgprot(pgprot_val(PAGE_SHARED_C)|_PAGE_CACHE040);
+ case VM_SHARED | VM_EXEC:
+ return __pgprot(pgprot_val(PAGE_READONLY_C)|_PAGE_CACHE040);
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return __pgprot(pgprot_val(PAGE_READONLY_C)|_PAGE_CACHE040);
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ return __pgprot(pgprot_val(PAGE_SHARED_C)|_PAGE_CACHE040);
+ case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE:
+ return __pgprot(pgprot_val(PAGE_SHARED_C)|_PAGE_CACHE040);
+ default:
+ BUILD_BUG();
+ }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
This defines and exports a platform specific custom vm_get_page_prot() via subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: linux-m68k@lists.linux-m68k.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> --- arch/m68k/Kconfig | 1 + arch/m68k/include/asm/mcf_pgtable.h | 59 ---------------- arch/m68k/include/asm/motorola_pgtable.h | 22 ------ arch/m68k/include/asm/sun3_pgtable.h | 22 ------ arch/m68k/mm/init.c | 87 ++++++++++++++++++++++++ arch/m68k/mm/motorola.c | 44 +++++++++++- 6 files changed, 129 insertions(+), 106 deletions(-)