@@ -70,6 +70,7 @@ struct dev_dax {
struct ida ida;
struct device dev;
struct dev_pagemap *pgmap;
+ bool memmap_on_memory;
int nr_range;
struct dev_dax_range {
unsigned long pgoff;
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */
+#include <linux/memory_hotplug.h>
#include <linux/memremap.h>
+#include <linux/memory.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/list.h>
@@ -1269,6 +1271,43 @@ static ssize_t numa_node_show(struct device *dev,
}
static DEVICE_ATTR_RO(numa_node);
+static ssize_t memmap_on_memory_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+
+ return sprintf(buf, "%d\n", dev_dax->memmap_on_memory);
+}
+
+static ssize_t memmap_on_memory_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+ ssize_t rc;
+ bool val;
+
+ rc = kstrtobool(buf, &val);
+ if (rc)
+ return rc;
+
+ device_lock(dax_region->dev);
+ if (!dax_region->dev->driver) {
+ device_unlock(dax_region->dev);
+ return -ENXIO;
+ }
+
+ if (mhp_supports_memmap_on_memory(memory_block_size_bytes()))
+ dev_dax->memmap_on_memory = val;
+ else
+ rc = -ENXIO;
+
+ device_unlock(dax_region->dev);
+ return rc == 0 ? len : rc;
+}
+static DEVICE_ATTR_RW(memmap_on_memory);
+
static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1295,6 +1334,7 @@ static struct attribute *dev_dax_attributes[] = {
&dev_attr_align.attr,
&dev_attr_resource.attr,
&dev_attr_numa_node.attr,
+ &dev_attr_memmap_on_memory.attr,
NULL,
};
@@ -1400,6 +1440,14 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
dev_dax->align = dax_region->align;
ida_init(&dev_dax->ida);
+ /*
+ * If supported by memory_hotplug, allow memmap_on_memory behavior by
+ * default. This can be overridden via sysfs before handing the memory
+ * over to kmem if desired.
+ */
+ if (mhp_supports_memmap_on_memory(memory_block_size_bytes()))
+ dev_dax->memmap_on_memory = true;
+
inode = dax_inode(dax_dev);
dev->devt = inode->i_rdev;
dev->bus = &dax_bus_type;
@@ -56,6 +56,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
unsigned long total_len = 0;
struct dax_kmem_data *data;
int i, rc, mapped = 0;
+ mhp_t mhp_flags;
int numa_node;
/*
@@ -136,12 +137,16 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
*/
res->flags = IORESOURCE_SYSTEM_RAM;
+ mhp_flags = MHP_NID_IS_MGID;
+ if (dev_dax->memmap_on_memory)
+ mhp_flags |= MHP_MEMMAP_ON_MEMORY;
+
/*
* Ensure that future kexec'd kernels will not treat
* this as RAM automatically.
*/
rc = add_memory_driver_managed(data->mgid, range.start,
- range_len(&range), kmem_name, MHP_NID_IS_MGID);
+ range_len(&range), kmem_name, mhp_flags);
if (rc) {
dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
Large amounts of memory managed by the kmem driver may come in via CXL, and it is often desirable to have the memmap for this memory on the new memory itself. Enroll kmem-managed memory for memmap_on_memory semantics as a default if other requirements for it are met. Add a sysfs override under the dax device to opt out of this behavior. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Huang Ying <ying.huang@intel.com> Signed-off-by: Vishal Verma <vishal.l.verma@intel.com> --- drivers/dax/dax-private.h | 1 + drivers/dax/bus.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++ drivers/dax/kmem.c | 7 ++++++- 3 files changed, 55 insertions(+), 1 deletion(-)