@@ -221,6 +221,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
* * %-EINVAL - Reserved fields or invalid values were used.
* * %-ENOMEM - Input or output buffer wasn't sized properly.
* * %-EPERM - Attempted to use a protected command.
+ * * %-EBUSY - Kernel has claimed exclusive access to this opcode
*
* The result of this command is a fully validated command in @out_cmd that is
* safe to send to the hardware.
@@ -296,6 +297,10 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
if (!test_bit(info->id, cxlm->enabled_cmds))
return -ENOTTY;
+ /* Check that the command is not claimed for exclusive kernel use */
+ if (test_bit(info->id, cxlm->exclusive_cmds))
+ return -EBUSY;
+
/* Check the input buffer is the expected size */
if (info->size_in >= 0 && info->size_in != send_cmd->in.size)
return -ENOMEM;
@@ -134,6 +134,38 @@ static const struct device_type cxl_memdev_type = {
.groups = cxl_memdev_attribute_groups,
};
+/**
+ * set_exclusive_cxl_commands() - atomically disable user cxl commands
+ * @cxlm: cxl_mem instance to modify
+ * @cmds: bitmap of commands to mark exclusive
+ *
+ * Grab the cxl_memdev_rwsem in write mode to flush in-flight
+ * invocations of the ioctl path and then disable future execution of
+ * commands with the command ids set in @cmds.
+ */
+void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
+{
+ down_write(&cxl_memdev_rwsem);
+ bitmap_or(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
+ CXL_MEM_COMMAND_ID_MAX);
+ up_write(&cxl_memdev_rwsem);
+}
+EXPORT_SYMBOL_GPL(set_exclusive_cxl_commands);
+
+/**
+ * clear_exclusive_cxl_commands() - atomically enable user cxl commands
+ * @cxlm: cxl_mem instance to modify
+ * @cmds: bitmap of commands to mark available for userspace
+ */
+void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
+{
+ down_write(&cxl_memdev_rwsem);
+ bitmap_andnot(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
+ CXL_MEM_COMMAND_ID_MAX);
+ up_write(&cxl_memdev_rwsem);
+}
+EXPORT_SYMBOL_GPL(clear_exclusive_cxl_commands);
+
static void cxl_memdev_shutdown(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
@@ -101,6 +101,7 @@ struct cxl_mbox_cmd {
* @mbox_mutex: Mutex to synchronize mailbox access.
* @firmware_version: Firmware version for the memory device.
* @enabled_cmds: Hardware commands found enabled in CEL.
+ * @exclusive_cmds: Commands that are kernel-internal only
* @pmem_range: Active Persistent memory capacity configuration
* @ram_range: Active Volatile memory capacity configuration
* @total_bytes: sum of all possible capacities
@@ -127,6 +128,7 @@ struct cxl_mem {
struct mutex mbox_mutex; /* Protects device mailbox and firmware */
char firmware_version[0x10];
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
+ DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
struct range pmem_range;
struct range ram_range;
@@ -200,4 +202,6 @@ int cxl_mem_identify(struct cxl_mem *cxlm);
int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm);
int cxl_mem_create_range_info(struct cxl_mem *cxlm);
struct cxl_mem *cxl_mem_create(struct device *dev);
+void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
+void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
#endif /* __CXL_MEM_H__ */
@@ -16,6 +16,13 @@
*/
static struct workqueue_struct *cxl_pmem_wq;
+static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
+
+static void clear_exclusive(void *cxlm)
+{
+ clear_exclusive_cxl_commands(cxlm, exclusive_cmds);
+}
+
static void unregister_nvdimm(void *nvdimm)
{
nvdimm_delete(nvdimm);
@@ -39,25 +46,37 @@ static struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void)
static int cxl_nvdimm_probe(struct device *dev)
{
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
+ struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_mem *cxlm = cxlmd->cxlm;
struct cxl_nvdimm_bridge *cxl_nvb;
unsigned long flags = 0;
struct nvdimm *nvdimm;
- int rc = -ENXIO;
+ int rc;
cxl_nvb = cxl_find_nvdimm_bridge();
if (!cxl_nvb)
return -ENXIO;
device_lock(&cxl_nvb->dev);
- if (!cxl_nvb->nvdimm_bus)
+ if (!cxl_nvb->nvdimm_bus) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ set_exclusive_cxl_commands(cxlm, exclusive_cmds);
+ rc = devm_add_action_or_reset(dev, clear_exclusive, cxlm);
+ if (rc)
goto out;
set_bit(NDD_LABELING, &flags);
nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, 0, 0,
NULL);
- if (!nvdimm)
+ if (!nvdimm) {
+ rc = -ENOMEM;
goto out;
+ }
+ dev_set_drvdata(dev, nvdimm);
rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
out:
device_unlock(&cxl_nvb->dev);
@@ -194,6 +213,10 @@ static __init int cxl_pmem_init(void)
{
int rc;
+ set_bit(CXL_MEM_COMMAND_ID_SET_PARTITION_INFO, exclusive_cmds);
+ set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
+ set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
+
cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0);
if (!cxl_pmem_wq)
return -ENXIO;