diff mbox

[5/6] arm64: Implement pmem API support

Message ID ba8c28bcffc3bc856163441793bb4ef0fefa0f75.1500397441.git.robin.murphy@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Robin Murphy July 25, 2017, 10:55 a.m. UTC
Add a clean-to-point-of-persistence cache maintenance helper, and wire
up the basic architectural support for the pmem driver based on it.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
---
 arch/arm64/Kconfig                  | 11 +++++++++++
 arch/arm64/include/asm/assembler.h  |  6 ++++++
 arch/arm64/include/asm/cacheflush.h |  1 +
 arch/arm64/include/asm/cpucaps.h    |  3 ++-
 arch/arm64/kernel/cpufeature.c      | 11 +++++++++++
 arch/arm64/mm/cache.S               | 14 ++++++++++++++
 arch/arm64/mm/pageattr.c            | 18 ++++++++++++++++++
 7 files changed, 63 insertions(+), 1 deletion(-)

Comments

Catalin Marinas Aug. 4, 2017, 3:25 p.m. UTC | #1
Two minor comments below.

On Tue, Jul 25, 2017 at 11:55:42AM +0100, Robin Murphy wrote:
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -960,6 +960,17 @@ config ARM64_UAO
>  	  regular load/store instructions if the cpu does not implement the
>  	  feature.
>  
> +config ARM64_PMEM
> +	bool "Enable support for persistent memory"
> +	select ARCH_HAS_PMEM_API
> +	help
> +	  Say Y to enable support for the persistent memory API based on the
> +	  ARMv8.2 DCPoP feature.
> +
> +	  The feature is detected at runtime, and the kernel will use DC CVAC
> +	  operations if DC CVAP is not supported (following the behaviour of
> +	  DC CVAP itself if the system does not define a point of persistence).

Any reason not to have this default y?

> --- a/arch/arm64/mm/cache.S
> +++ b/arch/arm64/mm/cache.S
> @@ -172,6 +172,20 @@ ENDPIPROC(__clean_dcache_area_poc)
>  ENDPROC(__dma_clean_area)
>  
>  /*
> + *	__clean_dcache_area_pop(kaddr, size)
> + *
> + * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
> + * 	are cleaned to the PoP.
> + *
> + *	- kaddr   - kernel address
> + *	- size    - size in question
> + */
> +ENTRY(__clean_dcache_area_pop)
> +	dcache_by_line_op cvap, sy, x0, x1, x2, x3
> +	ret
> +ENDPIPROC(__clean_dcache_area_pop)
> +
> +/*
>   *	__dma_flush_area(start, size)
>   *
>   *	clean & invalidate D / U line
> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> index a682a0a2a0fa..a461a00ceb3e 100644
> --- a/arch/arm64/mm/pageattr.c
> +++ b/arch/arm64/mm/pageattr.c
> @@ -183,3 +183,21 @@ bool kernel_page_present(struct page *page)
>  }
>  #endif /* CONFIG_HIBERNATION */
>  #endif /* CONFIG_DEBUG_PAGEALLOC */
> +
> +#ifdef CONFIG_ARCH_HAS_PMEM_API
> +#include <asm/cacheflush.h>
> +
> +static inline void arch_wb_cache_pmem(void *addr, size_t size)
> +{
> +	/* Ensure order against any prior non-cacheable writes */
> +	dmb(sy);
> +	__clean_dcache_area_pop(addr, size);
> +}

Could we keep the dmb() in the actual __clean_dcache_area_pop()
implementation?

I can do the changes myself if you don't have any objections.
Robin Murphy Aug. 4, 2017, 5:43 p.m. UTC | #2
On 04/08/17 16:25, Catalin Marinas wrote:
> Two minor comments below.
> 
> On Tue, Jul 25, 2017 at 11:55:42AM +0100, Robin Murphy wrote:
>> --- a/arch/arm64/Kconfig
>> +++ b/arch/arm64/Kconfig
>> @@ -960,6 +960,17 @@ config ARM64_UAO
>>  	  regular load/store instructions if the cpu does not implement the
>>  	  feature.
>>  
>> +config ARM64_PMEM
>> +	bool "Enable support for persistent memory"
>> +	select ARCH_HAS_PMEM_API
>> +	help
>> +	  Say Y to enable support for the persistent memory API based on the
>> +	  ARMv8.2 DCPoP feature.
>> +
>> +	  The feature is detected at runtime, and the kernel will use DC CVAC
>> +	  operations if DC CVAP is not supported (following the behaviour of
>> +	  DC CVAP itself if the system does not define a point of persistence).
> 
> Any reason not to have this default y?

Mostly because it's untested, and not actually useful without some way
of describing persistent memory regions to the kernel (I'm currently
trying to make sense of what exactly ARCH_HAS_MMIO_FLUSH is supposed to
mean in order to enable ACPI NFIT support).

There's also the potential issue that we can't disable ARCH_HAS_PMEM_API
at runtime on pre-v8.2 systems where DC CVAC may not strictly give the
guarantee of persistence that that is supposed to imply. However, I
guess that's more of an open problem, since even on a v8.2 CPU reporting
(mandatory) DC CVAP support we've still no way to actually know whether
the interconnect/memory controller/etc. of any old system is up to the
job. At this point I'm mostly hoping that people will only be sticking
NVDIMMs into systems that *are* properly designed to support them, v8.2
CPUs or not.

>> --- a/arch/arm64/mm/cache.S
>> +++ b/arch/arm64/mm/cache.S
>> @@ -172,6 +172,20 @@ ENDPIPROC(__clean_dcache_area_poc)
>>  ENDPROC(__dma_clean_area)
>>  
>>  /*
>> + *	__clean_dcache_area_pop(kaddr, size)
>> + *
>> + * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
>> + * 	are cleaned to the PoP.
>> + *
>> + *	- kaddr   - kernel address
>> + *	- size    - size in question
>> + */
>> +ENTRY(__clean_dcache_area_pop)
>> +	dcache_by_line_op cvap, sy, x0, x1, x2, x3
>> +	ret
>> +ENDPIPROC(__clean_dcache_area_pop)
>> +
>> +/*
>>   *	__dma_flush_area(start, size)
>>   *
>>   *	clean & invalidate D / U line
>> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
>> index a682a0a2a0fa..a461a00ceb3e 100644
>> --- a/arch/arm64/mm/pageattr.c
>> +++ b/arch/arm64/mm/pageattr.c
>> @@ -183,3 +183,21 @@ bool kernel_page_present(struct page *page)
>>  }
>>  #endif /* CONFIG_HIBERNATION */
>>  #endif /* CONFIG_DEBUG_PAGEALLOC */
>> +
>> +#ifdef CONFIG_ARCH_HAS_PMEM_API
>> +#include <asm/cacheflush.h>
>> +
>> +static inline void arch_wb_cache_pmem(void *addr, size_t size)
>> +{
>> +	/* Ensure order against any prior non-cacheable writes */
>> +	dmb(sy);
>> +	__clean_dcache_area_pop(addr, size);
>> +}
> 
> Could we keep the dmb() in the actual __clean_dcache_area_pop()
> implementation?

Mark held the opinion that it should follow the same pattern as the
other cache maintenance primitives - e.g. we don't have such a dmb in
__inval_cache_range(), but do place them at callsites where we know it
may be necessary (head.S) - and I found it hard to disagree. The callers
in patch #6 should never need a barrier, and arguably we may not even
need this one, since it looks like pmem should currently always be
mapped as MEMREMAP_WB if ARCH_HAS_PMEM_API.

> I can do the changes myself if you don't have any objections.

If you would prefer to play safe and move it back into the assembly
that's fine by me, but note that the associated comments in patch #6
should also be removed if so.

Robin.
Dan Williams Aug. 4, 2017, 6:09 p.m. UTC | #3
On Fri, Aug 4, 2017 at 10:43 AM, Robin Murphy <robin.murphy@arm.com> wrote:
> On 04/08/17 16:25, Catalin Marinas wrote:
>> Two minor comments below.
>>
>> On Tue, Jul 25, 2017 at 11:55:42AM +0100, Robin Murphy wrote:
>>> --- a/arch/arm64/Kconfig
>>> +++ b/arch/arm64/Kconfig
>>> @@ -960,6 +960,17 @@ config ARM64_UAO
>>>        regular load/store instructions if the cpu does not implement the
>>>        feature.
>>>
>>> +config ARM64_PMEM
>>> +    bool "Enable support for persistent memory"
>>> +    select ARCH_HAS_PMEM_API
>>> +    help
>>> +      Say Y to enable support for the persistent memory API based on the
>>> +      ARMv8.2 DCPoP feature.
>>> +
>>> +      The feature is detected at runtime, and the kernel will use DC CVAC
>>> +      operations if DC CVAP is not supported (following the behaviour of
>>> +      DC CVAP itself if the system does not define a point of persistence).
>>
>> Any reason not to have this default y?
>
> Mostly because it's untested, and not actually useful without some way
> of describing persistent memory regions to the kernel (I'm currently
> trying to make sense of what exactly ARCH_HAS_MMIO_FLUSH is supposed to
> mean in order to enable ACPI NFIT support).

This is related to block-aperture support described by the NFIT where
a sliding-memory-mapped window can be programmed to access different
ranges of the NVDIMM. Before the window is programmed to a new
DIMM-address we need to flush any dirty data through the current
window setting to media. See the call to mmio_flush_range() in
acpi_nfit_blk_single_io(). I think it's ok to omit ARCH_HAS_MMIO_FLUSH
support, and add a configuration option to compile out the
block-aperture support.
Robin Murphy Aug. 4, 2017, 6:35 p.m. UTC | #4
On 04/08/17 19:09, Dan Williams wrote:
> On Fri, Aug 4, 2017 at 10:43 AM, Robin Murphy <robin.murphy@arm.com> wrote:
>> On 04/08/17 16:25, Catalin Marinas wrote:
>>> Two minor comments below.
>>>
>>> On Tue, Jul 25, 2017 at 11:55:42AM +0100, Robin Murphy wrote:
>>>> --- a/arch/arm64/Kconfig
>>>> +++ b/arch/arm64/Kconfig
>>>> @@ -960,6 +960,17 @@ config ARM64_UAO
>>>>        regular load/store instructions if the cpu does not implement the
>>>>        feature.
>>>>
>>>> +config ARM64_PMEM
>>>> +    bool "Enable support for persistent memory"
>>>> +    select ARCH_HAS_PMEM_API
>>>> +    help
>>>> +      Say Y to enable support for the persistent memory API based on the
>>>> +      ARMv8.2 DCPoP feature.
>>>> +
>>>> +      The feature is detected at runtime, and the kernel will use DC CVAC
>>>> +      operations if DC CVAP is not supported (following the behaviour of
>>>> +      DC CVAP itself if the system does not define a point of persistence).
>>>
>>> Any reason not to have this default y?
>>
>> Mostly because it's untested, and not actually useful without some way
>> of describing persistent memory regions to the kernel (I'm currently
>> trying to make sense of what exactly ARCH_HAS_MMIO_FLUSH is supposed to
>> mean in order to enable ACPI NFIT support).
> 
> This is related to block-aperture support described by the NFIT where
> a sliding-memory-mapped window can be programmed to access different
> ranges of the NVDIMM. Before the window is programmed to a new
> DIMM-address we need to flush any dirty data through the current
> window setting to media. See the call to mmio_flush_range() in
> acpi_nfit_blk_single_io(). I think it's ok to omit ARCH_HAS_MMIO_FLUSH
> support, and add a configuration option to compile out the
> block-aperture support.

Oh, I have every intention of implementing it one way or another if
necessary - it's not difficult, it's just been a question of working
through the NFIT code to figure out the subtleties of translation to
arm64 ;)

If mmio_flush_range() is for true MMIO (i.e. __iomem) mappings, then
arm64 should only need a barrier, rather than actual cache operations.
If on the other hand it's misleadingly named and only actually used on
MEMREMAP_WB mappings (as I'm staring to think it might be), then I can't
help thinking it could simply go away in favour of arch_wb_pmem(), since
that now seems to have those same semantics and intent, plus a much more
appropriate name.

Robin.
Dan Williams Aug. 4, 2017, 7:36 p.m. UTC | #5
On Fri, Aug 4, 2017 at 11:35 AM, Robin Murphy <robin.murphy@arm.com> wrote:
> On 04/08/17 19:09, Dan Williams wrote:
>> On Fri, Aug 4, 2017 at 10:43 AM, Robin Murphy <robin.murphy@arm.com> wrote:
>>> On 04/08/17 16:25, Catalin Marinas wrote:
>>>> Two minor comments below.
>>>>
>>>> On Tue, Jul 25, 2017 at 11:55:42AM +0100, Robin Murphy wrote:
>>>>> --- a/arch/arm64/Kconfig
>>>>> +++ b/arch/arm64/Kconfig
>>>>> @@ -960,6 +960,17 @@ config ARM64_UAO
>>>>>        regular load/store instructions if the cpu does not implement the
>>>>>        feature.
>>>>>
>>>>> +config ARM64_PMEM
>>>>> +    bool "Enable support for persistent memory"
>>>>> +    select ARCH_HAS_PMEM_API
>>>>> +    help
>>>>> +      Say Y to enable support for the persistent memory API based on the
>>>>> +      ARMv8.2 DCPoP feature.
>>>>> +
>>>>> +      The feature is detected at runtime, and the kernel will use DC CVAC
>>>>> +      operations if DC CVAP is not supported (following the behaviour of
>>>>> +      DC CVAP itself if the system does not define a point of persistence).
>>>>
>>>> Any reason not to have this default y?
>>>
>>> Mostly because it's untested, and not actually useful without some way
>>> of describing persistent memory regions to the kernel (I'm currently
>>> trying to make sense of what exactly ARCH_HAS_MMIO_FLUSH is supposed to
>>> mean in order to enable ACPI NFIT support).
>>
>> This is related to block-aperture support described by the NFIT where
>> a sliding-memory-mapped window can be programmed to access different
>> ranges of the NVDIMM. Before the window is programmed to a new
>> DIMM-address we need to flush any dirty data through the current
>> window setting to media. See the call to mmio_flush_range() in
>> acpi_nfit_blk_single_io(). I think it's ok to omit ARCH_HAS_MMIO_FLUSH
>> support, and add a configuration option to compile out the
>> block-aperture support.
>
> Oh, I have every intention of implementing it one way or another if
> necessary - it's not difficult, it's just been a question of working
> through the NFIT code to figure out the subtleties of translation to
> arm64 ;)
>
> If mmio_flush_range() is for true MMIO (i.e. __iomem) mappings, then
> arm64 should only need a barrier, rather than actual cache operations.
> If on the other hand it's misleadingly named and only actually used on
> MEMREMAP_WB mappings (as I'm staring to think it might be), then I can't
> help thinking it could simply go away in favour of arch_wb_pmem(), since
> that now seems to have those same semantics and intent, plus a much more
> appropriate name.
>

The mapping type of block-apertures is up to the architecture, so you
could mark them uncacheable and not worry about mmio_flush_range().
Also, arch_wb_pmem() is not a replacement for mmio_flush_range() since
we also need the cache to be invalidated. arch_wb_pmem() is allowed to
leave clean cache lines present.
Will Deacon Aug. 7, 2017, 6:33 p.m. UTC | #6
On Fri, Aug 04, 2017 at 04:25:42PM +0100, Catalin Marinas wrote:
> Two minor comments below.
> 
> On Tue, Jul 25, 2017 at 11:55:42AM +0100, Robin Murphy wrote:
> > --- a/arch/arm64/Kconfig
> > +++ b/arch/arm64/Kconfig
> > @@ -960,6 +960,17 @@ config ARM64_UAO
> >  	  regular load/store instructions if the cpu does not implement the
> >  	  feature.
> >  
> > +config ARM64_PMEM
> > +	bool "Enable support for persistent memory"
> > +	select ARCH_HAS_PMEM_API
> > +	help
> > +	  Say Y to enable support for the persistent memory API based on the
> > +	  ARMv8.2 DCPoP feature.
> > +
> > +	  The feature is detected at runtime, and the kernel will use DC CVAC
> > +	  operations if DC CVAP is not supported (following the behaviour of
> > +	  DC CVAP itself if the system does not define a point of persistence).
> 
> Any reason not to have this default y?
> 
> > --- a/arch/arm64/mm/cache.S
> > +++ b/arch/arm64/mm/cache.S
> > @@ -172,6 +172,20 @@ ENDPIPROC(__clean_dcache_area_poc)
> >  ENDPROC(__dma_clean_area)
> >  
> >  /*
> > + *	__clean_dcache_area_pop(kaddr, size)
> > + *
> > + * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
> > + * 	are cleaned to the PoP.
> > + *
> > + *	- kaddr   - kernel address
> > + *	- size    - size in question
> > + */
> > +ENTRY(__clean_dcache_area_pop)
> > +	dcache_by_line_op cvap, sy, x0, x1, x2, x3
> > +	ret
> > +ENDPIPROC(__clean_dcache_area_pop)
> > +
> > +/*
> >   *	__dma_flush_area(start, size)
> >   *
> >   *	clean & invalidate D / U line
> > diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> > index a682a0a2a0fa..a461a00ceb3e 100644
> > --- a/arch/arm64/mm/pageattr.c
> > +++ b/arch/arm64/mm/pageattr.c
> > @@ -183,3 +183,21 @@ bool kernel_page_present(struct page *page)
> >  }
> >  #endif /* CONFIG_HIBERNATION */
> >  #endif /* CONFIG_DEBUG_PAGEALLOC */
> > +
> > +#ifdef CONFIG_ARCH_HAS_PMEM_API
> > +#include <asm/cacheflush.h>
> > +
> > +static inline void arch_wb_cache_pmem(void *addr, size_t size)
> > +{
> > +	/* Ensure order against any prior non-cacheable writes */
> > +	dmb(sy);
> > +	__clean_dcache_area_pop(addr, size);
> > +}
> 
> Could we keep the dmb() in the actual __clean_dcache_area_pop()
> implementation?
> 
> I can do the changes myself if you don't have any objections.

I *think* the DMB can also be reworked to use the outer-shareable domain,
much as we do for the dma_* barriers.

Will
diff mbox

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index dfd908630631..0b0576a54724 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -960,6 +960,17 @@  config ARM64_UAO
 	  regular load/store instructions if the cpu does not implement the
 	  feature.
 
+config ARM64_PMEM
+	bool "Enable support for persistent memory"
+	select ARCH_HAS_PMEM_API
+	help
+	  Say Y to enable support for the persistent memory API based on the
+	  ARMv8.2 DCPoP feature.
+
+	  The feature is detected at runtime, and the kernel will use DC CVAC
+	  operations if DC CVAP is not supported (following the behaviour of
+	  DC CVAP itself if the system does not define a point of persistence).
+
 endmenu
 
 config ARM64_MODULE_CMODEL_LARGE
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 1b67c3782d00..5d8903c45031 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -353,6 +353,12 @@  alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
 alternative_else
 	dc	civac, \kaddr
 alternative_endif
+	.elseif	(\op == cvap)
+alternative_if ARM64_HAS_DCPOP
+	sys 3, c7, c12, 1, \kaddr	// dc cvap
+alternative_else
+	dc	cvac, \kaddr
+alternative_endif
 	.else
 	dc	\op, \kaddr
 	.endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index b4b43a94dffd..76d1cc85d5b1 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -69,6 +69,7 @@  extern void flush_icache_range(unsigned long start, unsigned long end);
 extern void __flush_dcache_area(void *addr, size_t len);
 extern void __inval_dcache_area(void *addr, size_t len);
 extern void __clean_dcache_area_poc(void *addr, size_t len);
+extern void __clean_dcache_area_pop(void *addr, size_t len);
 extern void __clean_dcache_area_pou(void *addr, size_t len);
 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
 extern void sync_icache_aliases(void *kaddr, unsigned long len);
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 8d2272c6822c..8da621627d7c 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -39,7 +39,8 @@ 
 #define ARM64_WORKAROUND_QCOM_FALKOR_E1003	18
 #define ARM64_WORKAROUND_858921			19
 #define ARM64_WORKAROUND_CAVIUM_30115		20
+#define ARM64_HAS_DCPOP				21
 
-#define ARM64_NCAPS				21
+#define ARM64_NCAPS				22
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index a2542ef3ff25..cd52d365d1f0 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -889,6 +889,17 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.min_field_value = 0,
 		.matches = has_no_fpsimd,
 	},
+#ifdef CONFIG_ARM64_PMEM
+	{
+		.desc = "Data cache clean to Point of Persistence",
+		.capability = ARM64_HAS_DCPOP,
+		.def_scope = SCOPE_SYSTEM,
+		.matches = has_cpuid_feature,
+		.sys_reg = SYS_ID_AA64ISAR1_EL1,
+		.field_pos = ID_AA64ISAR1_DPB_SHIFT,
+		.min_field_value = 1,
+	},
+#endif
 	{},
 };
 
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index ed47fbbb4b05..7f1dbe962cf5 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -172,6 +172,20 @@  ENDPIPROC(__clean_dcache_area_poc)
 ENDPROC(__dma_clean_area)
 
 /*
+ *	__clean_dcache_area_pop(kaddr, size)
+ *
+ * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * 	are cleaned to the PoP.
+ *
+ *	- kaddr   - kernel address
+ *	- size    - size in question
+ */
+ENTRY(__clean_dcache_area_pop)
+	dcache_by_line_op cvap, sy, x0, x1, x2, x3
+	ret
+ENDPIPROC(__clean_dcache_area_pop)
+
+/*
  *	__dma_flush_area(start, size)
  *
  *	clean & invalidate D / U line
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index a682a0a2a0fa..a461a00ceb3e 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -183,3 +183,21 @@  bool kernel_page_present(struct page *page)
 }
 #endif /* CONFIG_HIBERNATION */
 #endif /* CONFIG_DEBUG_PAGEALLOC */
+
+#ifdef CONFIG_ARCH_HAS_PMEM_API
+#include <asm/cacheflush.h>
+
+static inline void arch_wb_cache_pmem(void *addr, size_t size)
+{
+	/* Ensure order against any prior non-cacheable writes */
+	dmb(sy);
+	__clean_dcache_area_pop(addr, size);
+}
+EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
+
+static inline void arch_invalidate_pmem(void *addr, size_t size)
+{
+	__inval_dcache_area(addr, size);
+}
+EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
+#endif