From patchwork Fri Feb 20 10:31:24 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yoshihiro Shimoda X-Patchwork-Id: 8097 X-Patchwork-Delegate: lethal@linux-sh.org Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by demeter.kernel.org (8.14.2/8.14.2) with ESMTP id n1KAVX8s008641 for ; Fri, 20 Feb 2009 10:31:34 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754504AbZBTKbd (ORCPT ); Fri, 20 Feb 2009 05:31:33 -0500 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1754304AbZBTKbd (ORCPT ); Fri, 20 Feb 2009 05:31:33 -0500 Received: from mail.renesas.com ([202.234.163.13]:58728 "EHLO mail06.idc.renesas.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1753821AbZBTKbb (ORCPT ); Fri, 20 Feb 2009 05:31:31 -0500 X-AuditID: ac14038a-0000000a00001ca1-7c-499e86806b42 Received: from guardian01.idc.renesas.com ([172.20.8.200]) by mail06.idc.renesas.com (sendmail) with ESMTP id n1KAVQfD019061; Fri, 20 Feb 2009 19:31:26 +0900 (JST) Received: (from root@localhost) by guardian01.idc.renesas.com with id n1KAVPdr013511; Fri, 20 Feb 2009 19:31:25 +0900 (JST) Received: from mta03.idc.renesas.com (localhost [127.0.0.1]) by mta03.idc.renesas.com with ESMTP id n1KAVRVX010982; Fri, 20 Feb 2009 19:31:27 +0900 (JST) Received: from [172.30.8.157] by ims05.idc.renesas.com (Sendmail) with ESMTPA id <0KFD00AQE18CXG@ims05.idc.renesas.com>; Fri, 20 Feb 2009 19:31:25 +0900 (JST) Date: Fri, 20 Feb 2009 19:31:24 +0900 From: Yoshihiro Shimoda Subject: [PATCH 1/3] sh: Add support 32-bit physical addressing by other method To: Paul Mundt Cc: linux-sh@vger.kernel.org Message-id: <499E867C.6080202@renesas.com> MIME-version: 1.0 Content-type: text/plain; charset=ISO-8859-1 Content-transfer-encoding: 7bit User-Agent: Thunderbird 2.0.0.14 (Windows/20080421) X-Brightmail-Tracker: AAAAAA== Sender: linux-sh-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-sh@vger.kernel.org There doesn't manage PMB in the kernel. The boot loader should set up PMB. P1/P2 area is handled like a 29-bit physical addressing, and local bus devices are assigned P3 area. Signed-off-by: Yoshihiro Shimoda --- arch/sh/boot/Makefile | 15 +++++++++++++ arch/sh/include/asm/addrspace.h | 4 +- arch/sh/include/asm/io.h | 4 +- arch/sh/include/asm/page.h | 7 +++++- arch/sh/kernel/vmlinux_32.lds.S | 7 ++++- arch/sh/mm/Kconfig | 32 ++++++++++++++++++++++++++- arch/sh/mm/Makefile_32 | 1 + arch/sh/mm/fixed_pmb.c | 45 +++++++++++++++++++++++++++++++++++++++ arch/sh/mm/ioremap_32.c | 6 +++- 9 files changed, 110 insertions(+), 11 deletions(-) create mode 100644 arch/sh/mm/fixed_pmb.c diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile index c16ccd4..d7ff2af 100644 --- a/arch/sh/boot/Makefile +++ b/arch/sh/boot/Makefile @@ -34,8 +34,10 @@ $(obj)/compressed/vmlinux: FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ ifeq ($(CONFIG_32BIT),y) +ifeq ($(CONFIG_FIXED_PMB),y) KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_PAGE_OFFSET) + \ + ($(CONFIG_MEMORY_START) & 0x1fffffff) + \ $(CONFIG_ZERO_PAGE_OFFSET)]') else KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ @@ -43,11 +45,24 @@ KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ $(CONFIG_MEMORY_START) + \ $(CONFIG_ZERO_PAGE_OFFSET)]') endif +else +KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ + $$[$(CONFIG_PAGE_OFFSET) + \ + $(CONFIG_MEMORY_START) + \ + $(CONFIG_ZERO_PAGE_OFFSET)]') +endif +ifeq ($(CONFIG_FIXED_PMB),y) +KERNEL_ENTRY := $(shell /bin/bash -c 'printf "0x%08x" \ + $$[$(CONFIG_PAGE_OFFSET) + \ + ($(CONFIG_MEMORY_START) & 0x1fffffff) + \ + $(CONFIG_ZERO_PAGE_OFFSET) + $(CONFIG_ENTRY_OFFSET)]') +else KERNEL_ENTRY := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_PAGE_OFFSET) + \ $(CONFIG_MEMORY_START) + \ $(CONFIG_ZERO_PAGE_OFFSET) + $(CONFIG_ENTRY_OFFSET)]') +endif quiet_cmd_uimage = UIMAGE $@ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sh -O linux -T kernel \ diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 36736c7..33fafe4 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -31,7 +31,7 @@ /* Returns the physical address of a PnSEG (n=1,2) address */ #define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) -#ifdef CONFIG_29BIT +#if defined(CONFIG_29BIT) || defined(CONFIG_FIXED_PMB) /* * Map an address to a certain privileged segment */ @@ -43,7 +43,7 @@ ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) #define P4SEGADDR(a) \ ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) -#endif /* 29BIT */ +#endif /* 29BIT || FIXED_PMB */ #endif /* P1SEG */ /* Check if an address can be reached in 29 bits */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 61f6dae..79794ad 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -238,7 +238,7 @@ extern void onchip_unmap(unsigned long vaddr); static inline void __iomem * __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) { -#ifdef CONFIG_SUPERH32 +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_FIXED_PMB) unsigned long last_addr = offset + size - 1; #endif void __iomem *ret; @@ -247,7 +247,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) if (ret) return ret; -#ifdef CONFIG_SUPERH32 +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_FIXED_PMB) /* * For P1 and P2 space this is trivial, as everything is already * mapped. Uncached access for P1 addresses are done through P2. diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 5871d78..bcffc46 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -129,7 +129,12 @@ typedef struct page *pgtable_t; * is not visible (it is part of the PMB mapping) and so needs to be * added or subtracted as required. */ -#ifdef CONFIG_32BIT +#if defined(CONFIG_FIXED_PMB) +/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */ +#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START)) +#define __pa(x) ((unsigned long)(x) - PMB_OFFSET) +#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET)) +#elif defined(CONFIG_32BIT) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) #else diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S index 7b4b82b..69fe954 100644 --- a/arch/sh/kernel/vmlinux_32.lds.S +++ b/arch/sh/kernel/vmlinux_32.lds.S @@ -15,8 +15,11 @@ OUTPUT_ARCH(sh) ENTRY(_start) SECTIONS { -#ifdef CONFIG_32BIT - . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET; +#ifdef CONFIG_FIXED_PMB + . = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) + + CONFIG_ZERO_PAGE_OFFSET; +#elif defined(CONFIG_32BIT) + . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET; #else . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET; #endif diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 555ec97..8e1b6cf 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -57,8 +57,8 @@ config 32BIT bool default y if CPU_SH5 -config PMB - bool "Support 32-bit physical addressing through PMB" +config PMB_ENABLE + bool "Support 32-bit physical addresing through PMB" depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) select 32BIT default y @@ -67,6 +67,34 @@ config PMB 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. +choice + prompt "PMB handling type" + depends on PMB_ENABLE + default FIXED_PMB + +config PMB + bool "PMB" + depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) + select 32BIT + help + If you say Y here, physical addressing will be extended to + 32-bits through the SH-4A PMB. If this is not set, legacy + 29-bit physical addressing will be used. + +config FIXED_PMB + bool "fixed PMB" + depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || \ + CPU_SUBTYPE_SH7785) + select 32BIT + help + If this option is enable, PMB is fixed (extract cache type + setting). You should set up PMB by boot loader beforehand. + P1/P2 area is handled like a 29-bit physical addressing, + and local bus devices are assigned P3 area. So you can use + 512Mbyte or less system memory. + +endchoice + config X2TLB bool "Enable extended TLB mode" depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index cb2f3f2..48606d9 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -35,6 +35,7 @@ endif obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PMB) += pmb.o +obj-$(CONFIG_FIXED_PMB) += fixed_pmb.o obj-$(CONFIG_NUMA) += numa.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/mm/fixed_pmb.c b/arch/sh/mm/fixed_pmb.c new file mode 100644 index 0000000..43c8eac --- /dev/null +++ b/arch/sh/mm/fixed_pmb.c @@ -0,0 +1,45 @@ +/* + * arch/sh/mm/fixed_pmb.c + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include + +static int __uses_jump_to_uncached fixed_pmb_init(void) +{ + int i; + unsigned long addr, data; + + jump_to_uncached(); + + for (i = 0; i < PMB_ENTRY_MAX; i++) { + addr = PMB_DATA + (i << PMB_E_SHIFT); + data = ctrl_inl(addr); + if (!(data & PMB_V)) + continue; + + if (data & PMB_C) { +#if defined(CONFIG_CACHE_WRITETHROUGH) + data |= PMB_WT; +#elif defined(CONFIG_CACHE_WRITEBACK) + data &= ~PMB_WT; +#else + data &= ~(PMB_C | PMB_WT); +#endif + } + ctrl_outl(data, addr); + } + + back_to_cached(); + + return 0; +} +arch_initcall(fixed_pmb_init); diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index 32946fb..0e337d2 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -59,11 +59,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr)) return (void __iomem *)phys_addr; +#if !defined(CONFIG_FIXED_PMB) /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) return NULL; +#endif /* * Mappings have to be page-aligned @@ -81,7 +83,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, area->phys_addr = phys_addr; orig_addr = addr = (unsigned long)area->addr; -#ifdef CONFIG_32BIT +#ifdef CONFIG_PMB /* * First try to remap through the PMB once a valid VMA has been * established. Smaller allocations (or the rest of the size @@ -122,7 +124,7 @@ void __iounmap(void __iomem *addr) if (seg < P3SEG || seg >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) return; -#ifdef CONFIG_32BIT +#ifdef CONFIG_PMB /* * Purge any PMB entries that may have been established for this * mapping, then proceed with conventional VMA teardown.