Message ID | 418c69fb675136a4768c3ef45521ad70131645c1.1499586046.git.kai.huang@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Hi, On 07/09/2017 09:10 AM, Kai Huang wrote: > Currently Xen only has non-cacheable version of ioremap. Although EPC is > reported as reserved memory in e820 but it can be mapped as cacheable. This > patch adds ioremap_cache (cacheable version of ioremap). > > Signed-off-by: Kai Huang <kai.huang@linux.intel.com> > --- > xen/arch/x86/mm.c | 15 +++++++++++++-- > xen/include/xen/vmap.h | 1 + First of all, this is common code and the "REST" maintainers should have been CCed for this include. But xen/include/xen/vmap.h is common code and going to break ARM. We already have an inline implementation of ioremap_nocache. You should move the definition in x86 specific headers. Please make sure to at least build test ARM when touching common code. Cheers, > 2 files changed, 14 insertions(+), 2 deletions(-) > > diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c > index 101ab33193..d0b6b3a247 100644 > --- a/xen/arch/x86/mm.c > +++ b/xen/arch/x86/mm.c > @@ -6284,9 +6284,10 @@ void *__init arch_vmap_virt_end(void) > return (void *)fix_to_virt(__end_of_fixed_addresses); > } > > -void __iomem *ioremap(paddr_t pa, size_t len) > +static void __iomem *__ioremap(paddr_t pa, size_t len, bool_t cache) > { > mfn_t mfn = _mfn(PFN_DOWN(pa)); > + unsigned int flags = cache ? PAGE_HYPERVISOR : PAGE_HYPERVISOR_NOCACHE; > void *va; > > WARN_ON(page_is_ram_type(mfn_x(mfn), RAM_TYPE_CONVENTIONAL)); > @@ -6299,12 +6300,22 @@ void __iomem *ioremap(paddr_t pa, size_t len) > unsigned int offs = pa & (PAGE_SIZE - 1); > unsigned int nr = PFN_UP(offs + len); > > - va = __vmap(&mfn, nr, 1, 1, PAGE_HYPERVISOR_NOCACHE, VMAP_DEFAULT) + offs; > + va = __vmap(&mfn, nr, 1, 1, flags, VMAP_DEFAULT) + offs; > } > > return (void __force __iomem *)va; > } > > +void __iomem *ioremap(paddr_t pa, size_t len) > +{ > + return __ioremap(pa, len, false); > +} > + > +void __iomem *ioremap_cache(paddr_t pa, size_t len) > +{ > + return __ioremap(pa, len, true); > +} > + > int create_perdomain_mapping(struct domain *d, unsigned long va, > unsigned int nr, l1_pgentry_t **pl1tab, > struct page_info **ppg) > diff --git a/xen/include/xen/vmap.h b/xen/include/xen/vmap.h > index 369560e620..f6037e368c 100644 > --- a/xen/include/xen/vmap.h > +++ b/xen/include/xen/vmap.h > @@ -24,6 +24,7 @@ void *vzalloc(size_t size); > void vfree(void *va); > > void __iomem *ioremap(paddr_t, size_t); > +void __iomem *ioremap_cache(paddr_t, size_t); > > static inline void iounmap(void __iomem *va) > { >
Hi Julien, Thanks for pointing out. I'll move to x86 specific. I've cc-ed all maintainers reported by ./scripts/get_maintainer.pl, looks this script doesn't report all maintainers. Sorry. I'll add ARM maintainers next time. Thanks, -Kai On 7/12/2017 8:14 AM, Julien Grall wrote: > Hi, > > On 07/09/2017 09:10 AM, Kai Huang wrote: >> Currently Xen only has non-cacheable version of ioremap. Although EPC is >> reported as reserved memory in e820 but it can be mapped as cacheable. >> This >> patch adds ioremap_cache (cacheable version of ioremap). >> >> Signed-off-by: Kai Huang <kai.huang@linux.intel.com> >> --- >> xen/arch/x86/mm.c | 15 +++++++++++++-- >> xen/include/xen/vmap.h | 1 + > > First of all, this is common code and the "REST" maintainers should have > been CCed for this include. > > But xen/include/xen/vmap.h is common code and going to break ARM. We > already have an inline implementation of ioremap_nocache. You should > move the definition in x86 specific headers. > > Please make sure to at least build test ARM when touching common code. > > Cheers, > >> 2 files changed, 14 insertions(+), 2 deletions(-) >> >> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c >> index 101ab33193..d0b6b3a247 100644 >> --- a/xen/arch/x86/mm.c >> +++ b/xen/arch/x86/mm.c >> @@ -6284,9 +6284,10 @@ void *__init arch_vmap_virt_end(void) >> return (void *)fix_to_virt(__end_of_fixed_addresses); >> } >> -void __iomem *ioremap(paddr_t pa, size_t len) >> +static void __iomem *__ioremap(paddr_t pa, size_t len, bool_t cache) >> { >> mfn_t mfn = _mfn(PFN_DOWN(pa)); >> + unsigned int flags = cache ? PAGE_HYPERVISOR : >> PAGE_HYPERVISOR_NOCACHE; >> void *va; >> WARN_ON(page_is_ram_type(mfn_x(mfn), RAM_TYPE_CONVENTIONAL)); >> @@ -6299,12 +6300,22 @@ void __iomem *ioremap(paddr_t pa, size_t len) >> unsigned int offs = pa & (PAGE_SIZE - 1); >> unsigned int nr = PFN_UP(offs + len); >> - va = __vmap(&mfn, nr, 1, 1, PAGE_HYPERVISOR_NOCACHE, >> VMAP_DEFAULT) + offs; >> + va = __vmap(&mfn, nr, 1, 1, flags, VMAP_DEFAULT) + offs; >> } >> return (void __force __iomem *)va; >> } >> +void __iomem *ioremap(paddr_t pa, size_t len) >> +{ >> + return __ioremap(pa, len, false); >> +} >> + >> +void __iomem *ioremap_cache(paddr_t pa, size_t len) >> +{ >> + return __ioremap(pa, len, true); >> +} >> + >> int create_perdomain_mapping(struct domain *d, unsigned long va, >> unsigned int nr, l1_pgentry_t **pl1tab, >> struct page_info **ppg) >> diff --git a/xen/include/xen/vmap.h b/xen/include/xen/vmap.h >> index 369560e620..f6037e368c 100644 >> --- a/xen/include/xen/vmap.h >> +++ b/xen/include/xen/vmap.h >> @@ -24,6 +24,7 @@ void *vzalloc(size_t size); >> void vfree(void *va); >> void __iomem *ioremap(paddr_t, size_t); >> +void __iomem *ioremap_cache(paddr_t, size_t); >> static inline void iounmap(void __iomem *va) >> { >> >
>>> Julien Grall <julien.grall@arm.com> 07/11/17 10:15 PM >>> >On 07/09/2017 09:10 AM, Kai Huang wrote: >> Currently Xen only has non-cacheable version of ioremap. Although EPC is >> reported as reserved memory in e820 but it can be mapped as cacheable. This >> patch adds ioremap_cache (cacheable version of ioremap). >> >> Signed-off-by: Kai Huang <kai.huang@linux.intel.com> >> --- >> xen/arch/x86/mm.c | 15 +++++++++++++-- >> xen/include/xen/vmap.h | 1 + > >First of all, this is common code and the "REST" maintainers should have >been CCed for this include. > >But xen/include/xen/vmap.h is common code and going to break ARM. We >already have an inline implementation of ioremap_nocache. You should >move the definition in x86 specific headers. Indeed, plus the ARM implementation actually shows how this would better be done: Have a function allowing more than just true/false to be passed in, to eventually also allow having ioremap_wc() and alike as wrappers. As long as it's x86-specific I'd then also suggest calling the new wrapper function ioremap_wb() (as "cache" may also mean WT). Jan
On 07/12/2017 02:52 AM, Huang, Kai wrote: > Hi Julien, Hello Kai, Please avoid top-posting. > > Thanks for pointing out. I'll move to x86 specific. > > I've cc-ed all maintainers reported by ./scripts/get_maintainer.pl, > looks this script doesn't report all maintainers. Sorry. I'll add ARM > maintainers next time. I would always double check the result of scripts/get_maintainer.pl. I am aware of a bug in scripts/get_maintainers.pl where only maintainer of the specific component (here x86) are listed, even when you touch common code. In this case, I didn't ask to CC ARM maintainers, but CC "THE REST" group (see MAINTAINERS). Cheers,
On 7/12/2017 6:17 PM, Jan Beulich wrote: >>>> Julien Grall <julien.grall@arm.com> 07/11/17 10:15 PM >>> >> On 07/09/2017 09:10 AM, Kai Huang wrote: >>> Currently Xen only has non-cacheable version of ioremap. Although EPC is >>> reported as reserved memory in e820 but it can be mapped as cacheable. This >>> patch adds ioremap_cache (cacheable version of ioremap). >>> >>> Signed-off-by: Kai Huang <kai.huang@linux.intel.com> >>> --- >>> xen/arch/x86/mm.c | 15 +++++++++++++-- >>> xen/include/xen/vmap.h | 1 + >> >> First of all, this is common code and the "REST" maintainers should have >> been CCed for this include. >> >> But xen/include/xen/vmap.h is common code and going to break ARM. We >> already have an inline implementation of ioremap_nocache. You should >> move the definition in x86 specific headers. > > Indeed, plus the ARM implementation actually shows how this would better > be done: Have a function allowing more than just true/false to be passed in, > to eventually also allow having ioremap_wc() and alike as wrappers. As long > as it's x86-specific I'd then also suggest calling the new wrapper function > ioremap_wb() (as "cache" may also mean WT). Hi Jan, Thanks for comments. I'll do as you suggested. Thanks, -Kai > > Jan > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > https://lists.xen.org/xen-devel >
On 7/12/2017 7:13 PM, Julien Grall wrote: > > > On 07/12/2017 02:52 AM, Huang, Kai wrote: >> Hi Julien, > > Hello Kai, > > Please avoid top-posting. Sorry. Will avoid in the future :) > >> >> Thanks for pointing out. I'll move to x86 specific. >> >> I've cc-ed all maintainers reported by ./scripts/get_maintainer.pl, >> looks this script doesn't report all maintainers. Sorry. I'll add ARM >> maintainers next time. > > I would always double check the result of scripts/get_maintainer.pl. I > am aware of a bug in scripts/get_maintainers.pl where only maintainer of > the specific component (here x86) are listed, even when you touch common > code. > > In this case, I didn't ask to CC ARM maintainers, but CC "THE REST" > group (see MAINTAINERS). Understood. I'll follow in the future. Thanks, -Kai > > Cheers, >
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 101ab33193..d0b6b3a247 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -6284,9 +6284,10 @@ void *__init arch_vmap_virt_end(void) return (void *)fix_to_virt(__end_of_fixed_addresses); } -void __iomem *ioremap(paddr_t pa, size_t len) +static void __iomem *__ioremap(paddr_t pa, size_t len, bool_t cache) { mfn_t mfn = _mfn(PFN_DOWN(pa)); + unsigned int flags = cache ? PAGE_HYPERVISOR : PAGE_HYPERVISOR_NOCACHE; void *va; WARN_ON(page_is_ram_type(mfn_x(mfn), RAM_TYPE_CONVENTIONAL)); @@ -6299,12 +6300,22 @@ void __iomem *ioremap(paddr_t pa, size_t len) unsigned int offs = pa & (PAGE_SIZE - 1); unsigned int nr = PFN_UP(offs + len); - va = __vmap(&mfn, nr, 1, 1, PAGE_HYPERVISOR_NOCACHE, VMAP_DEFAULT) + offs; + va = __vmap(&mfn, nr, 1, 1, flags, VMAP_DEFAULT) + offs; } return (void __force __iomem *)va; } +void __iomem *ioremap(paddr_t pa, size_t len) +{ + return __ioremap(pa, len, false); +} + +void __iomem *ioremap_cache(paddr_t pa, size_t len) +{ + return __ioremap(pa, len, true); +} + int create_perdomain_mapping(struct domain *d, unsigned long va, unsigned int nr, l1_pgentry_t **pl1tab, struct page_info **ppg) diff --git a/xen/include/xen/vmap.h b/xen/include/xen/vmap.h index 369560e620..f6037e368c 100644 --- a/xen/include/xen/vmap.h +++ b/xen/include/xen/vmap.h @@ -24,6 +24,7 @@ void *vzalloc(size_t size); void vfree(void *va); void __iomem *ioremap(paddr_t, size_t); +void __iomem *ioremap_cache(paddr_t, size_t); static inline void iounmap(void __iomem *va) {
Currently Xen only has non-cacheable version of ioremap. Although EPC is reported as reserved memory in e820 but it can be mapped as cacheable. This patch adds ioremap_cache (cacheable version of ioremap). Signed-off-by: Kai Huang <kai.huang@linux.intel.com> --- xen/arch/x86/mm.c | 15 +++++++++++++-- xen/include/xen/vmap.h | 1 + 2 files changed, 14 insertions(+), 2 deletions(-)