@@ -9,7 +9,7 @@
#include <asm/cacheflush.h>
-static inline void clean_pmem_range_isa310(unsigned long start, unsigned long stop)
+static inline void __clean_pmem_range(unsigned long start, unsigned long stop)
{
unsigned long shift = l1_dcache_shift();
unsigned long bytes = l1_dcache_bytes();
@@ -18,13 +18,22 @@ static inline void clean_pmem_range_isa310(unsigned long start, unsigned long st
unsigned long i;
for (i = 0; i < size >> shift; i++, addr += bytes)
- asm volatile(PPC_DCBSTPS(%0, %1): :"i"(0), "r"(addr): "memory");
+ dcbf(addr);
+}
+static inline void __flush_pmem_range(unsigned long start, unsigned long stop)
+{
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
+ void *addr = (void *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
+ unsigned long i;
- asm volatile(PPC_PHWSYNC ::: "memory");
+ for (i = 0; i < size >> shift; i++, addr += bytes)
+ dcbf(addr);
}
-static inline void flush_pmem_range_isa310(unsigned long start, unsigned long stop)
+static inline void clean_pmem_range_isa310(unsigned long start, unsigned long stop)
{
unsigned long shift = l1_dcache_shift();
unsigned long bytes = l1_dcache_bytes();
@@ -33,24 +42,33 @@ static inline void flush_pmem_range_isa310(unsigned long start, unsigned long st
unsigned long i;
for (i = 0; i < size >> shift; i++, addr += bytes)
- asm volatile(PPC_DCBFPS(%0, %1): :"i"(0), "r"(addr): "memory");
+ asm volatile(PPC_DCBSTPS(%0, %1): :"i"(0), "r"(addr): "memory");
+}
+static inline void flush_pmem_range_isa310(unsigned long start, unsigned long stop)
+{
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
+ void *addr = (void *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
+ unsigned long i;
- asm volatile(PPC_PHWSYNC ::: "memory");
+ for (i = 0; i < size >> shift; i++, addr += bytes)
+ asm volatile(PPC_DCBFPS(%0, %1): :"i"(0), "r"(addr): "memory");
}
static inline void clean_pmem_range(unsigned long start, unsigned long stop)
{
if (cpu_has_feature(CPU_FTR_ARCH_31))
return clean_pmem_range_isa310(start, stop);
- return flush_dcache_range(start, stop);
+ return __clean_pmem_range(start, stop);
}
static inline void flush_pmem_range(unsigned long start, unsigned long stop)
{
if (cpu_has_feature(CPU_FTR_ARCH_31))
return flush_pmem_range_isa310(start, stop);
- return flush_dcache_range(start, stop);
+ return __flush_pmem_range(start, stop);
}
/*
@@ -285,6 +285,18 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
return 0;
}
+/*
+ * We have made sure the pmem writes are done such that before calling this
+ * all the caches are flushed/clean. We use dcbf/dcbfps to ensure this. Here
+ * we just need to add the necessary barrier to make sure the above flushes
+ * are have updated persistent storage before any data access or data transfer
+ * caused by subsequent instructions is initiated.
+ */
+static int papr_scm_flush_sync(struct nd_region *nd_region, struct bio *bio)
+{
+ arch_pmem_flush_barrier();
+ return 0;
+}
static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
{
@@ -340,6 +352,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
ndr_desc.mapping = &mapping;
ndr_desc.num_mappings = 1;
ndr_desc.nd_set = &p->nd_set;
+ ndr_desc.flush = papr_scm_flush_sync;
if (p->is_volatile)
p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
nvdimm expect the flush routines to just mark the cache clean. The barrier that mark the store globally visible is done in nvdimm_flush(). Update the papr_scm driver to a simplified nvdim_flush callback that do only the required barrier. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> --- arch/powerpc/lib/pmem.c | 34 +++++++++++++++++------ arch/powerpc/platforms/pseries/papr_scm.c | 13 +++++++++ 2 files changed, 39 insertions(+), 8 deletions(-)