Message ID | 20230922130548.3476202-1-jtp.park@samsung.com |
---|---|
State | New, archived |
Headers | show |
Series | cxl: Fix background operation handling | expand |
On 9/22/23 06:05, Jeongtae Park wrote: > This patch modifies sanitize command handling to support background-capable > commands asynchronously also. Much of the implementation follows the approach > of the sanitize command implementation, with the addition of 'bgmode' to > cxl_mbox_cmd explicitly could choose how each command is handled. However, > even if you want asynchronous processing via irq, it will fallback to poll > based if the device doesn't support it. Added a new cxl_bgcmd_state by > moving the necessary data structures from cxl_security_state. > > Signed-off-by: Jeongtae Park <jtp.park@samsung.com> > Signed-off-by: Hojin Nam <hj96.nam@samsung.com> > --- > drivers/cxl/core/memdev.c | 8 ++--- > drivers/cxl/cxlmem.h | 33 +++++++++++++++++--- > drivers/cxl/pci.c | 65 +++++++++++++++++++-------------------- > 3 files changed, 64 insertions(+), 42 deletions(-) > > diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c > index f99e7ec3cc40..8d1692231767 100644 > --- a/drivers/cxl/core/memdev.c > +++ b/drivers/cxl/core/memdev.c > @@ -537,13 +537,13 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, > } > EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL); > > -static void cxl_memdev_security_shutdown(struct device *dev) > +static void cxl_memdev_bgcmd_shutdown(struct device *dev) > { > struct cxl_memdev *cxlmd = to_cxl_memdev(dev); > struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); > > - if (mds->security.poll) > - cancel_delayed_work_sync(&mds->security.poll_dwork); > + if (mds->bgcmd.poll) > + cancel_delayed_work_sync(&mds->bgcmd.poll_dwork); > } > > static void cxl_memdev_shutdown(struct device *dev) > @@ -551,7 +551,7 @@ static void cxl_memdev_shutdown(struct device *dev) > struct cxl_memdev *cxlmd = to_cxl_memdev(dev); > > down_write(&cxl_memdev_rwsem); > - cxl_memdev_security_shutdown(dev); > + cxl_memdev_bgcmd_shutdown(dev); > cxlmd->cxlds = NULL; > up_write(&cxl_memdev_rwsem); > } > diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h > index 79e99c873ca2..879903a8c822 100644 > --- a/drivers/cxl/cxlmem.h > +++ b/drivers/cxl/cxlmem.h > @@ -100,6 +100,13 @@ static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port, > return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev); > } > > +enum cxl_bg_cmd_mode { > +#define CXL_BG_CMD_ASYNC BIT_MASK(0) > + CXL_BG_CMD_SYNC = 0x00, > + CXL_BG_CMD_ASYNC_POLL = 0x01, > + CXL_BG_CMD_ASYNC_INT = 0x03 > +}; > + > /** > * struct cxl_mbox_cmd - A command to be submitted to hardware. > * @opcode: (input) The command set and command submitted to hardware. > @@ -113,6 +120,8 @@ static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port, > * variable sized output commands, it tells the exact number of bytes > * written. > * @min_out: (input) internal command output payload size validation > + * @bg_mode: (input) The processing mode of a background command. It will be > + * valid only when the background command started successfully. > * @poll_count: (input) Number of timeouts to attempt. > * @poll_interval_ms: (input) Time between mailbox background command polling > * interval timeouts. > @@ -131,6 +140,7 @@ struct cxl_mbox_cmd { > size_t size_in; > size_t size_out; > size_t min_out; > + int bg_mode; > int poll_count; > int poll_interval_ms; > u16 return_code; > @@ -346,17 +356,31 @@ struct cxl_fw_state { > * struct cxl_security_state - Device security state > * > * @state: state of last security operation > - * @poll: polling for sanitization is enabled, device has no mbox irq support > - * @poll_tmo_secs: polling timeout > - * @poll_dwork: polling work item > * @sanitize_node: sanitation sysfs file to notify > */ > struct cxl_security_state { > unsigned long state; > + struct kernfs_node *sanitize_node; The sanitize_node is moved here from cxl_bgcmd_state, but does not appear to be used any longer. Does it need to just be removed? DJ > +}; > + > +/** > + * struct cxl_bgcmd_state - Device background command state > + * > + * @poll: polling for background command completion, device has no mbox > + * irq support > + * @poll_tmo_secs: polling timeout > + * @poll_dwork: polling work item > + * @opcode: The background command submitted to hardware > + * @mode: The background command submitted to hardware > + * @complete_node: background command completion file to notify > + */ > +struct cxl_bgcmd_state { > bool poll; > int poll_tmo_secs; > struct delayed_work poll_dwork; > - struct kernfs_node *sanitize_node; > + u16 opcode; > + int mode; > + struct kernfs_node *complete_node; > }; > > /* > @@ -460,6 +484,7 @@ struct cxl_memdev_state { > struct cxl_poison_state poison; > struct cxl_security_state security; > struct cxl_fw_state fw; > + struct cxl_bgcmd_state bgcmd; > > struct rcuwait mbox_wait; > int (*mbox_send)(struct cxl_memdev_state *mds, > diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c > index 1cb1494c28fe..570fca24ab12 100644 > --- a/drivers/cxl/pci.c > +++ b/drivers/cxl/pci.c > @@ -116,8 +116,6 @@ static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds) > > static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) > { > - u64 reg; > - u16 opcode; > struct cxl_dev_id *dev_id = id; > struct cxl_dev_state *cxlds = dev_id->cxlds; > struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); > @@ -125,13 +123,12 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) > if (!cxl_mbox_background_complete(cxlds)) > return IRQ_NONE; > > - reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); > - opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); > - if (opcode == CXL_MBOX_OP_SANITIZE) { > - if (mds->security.sanitize_node) > - sysfs_notify_dirent(mds->security.sanitize_node); > + if (mds->bgcmd.mode & CXL_BG_CMD_ASYNC) { > + if (mds->bgcmd.complete_node) > + sysfs_notify_dirent(mds->bgcmd.complete_node); > > - dev_dbg(cxlds->dev, "Sanitization operation ended\n"); > + dev_dbg(cxlds->dev, "Mailbox background operation (0x%04x) ended\n", > + mds->bgcmd.opcode); > } else { > /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */ > rcuwait_wake_up(&mds->mbox_wait); > @@ -141,28 +138,29 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) > } > > /* > - * Sanitization operation polling mode. > + * Background operation polling mode. > */ > -static void cxl_mbox_sanitize_work(struct work_struct *work) > +static void cxl_mbox_bg_work(struct work_struct *work) > { > struct cxl_memdev_state *mds = > - container_of(work, typeof(*mds), security.poll_dwork.work); > + container_of(work, typeof(*mds), bgcmd.poll_dwork.work); > struct cxl_dev_state *cxlds = &mds->cxlds; > > mutex_lock(&mds->mbox_mutex); > if (cxl_mbox_background_complete(cxlds)) { > - mds->security.poll_tmo_secs = 0; > + mds->bgcmd.poll_tmo_secs = 0; > put_device(cxlds->dev); > > - if (mds->security.sanitize_node) > - sysfs_notify_dirent(mds->security.sanitize_node); > + if (mds->bgcmd.complete_node) > + sysfs_notify_dirent(mds->bgcmd.complete_node); > > - dev_dbg(cxlds->dev, "Sanitization operation ended\n"); > + dev_dbg(cxlds->dev, "Mailbox background operation (0x%04x) ended\n", > + mds->bgcmd.opcode); > } else { > - int timeout = mds->security.poll_tmo_secs + 10; > + int timeout = mds->bgcmd.poll_tmo_secs + 10; > > - mds->security.poll_tmo_secs = min(15 * 60, timeout); > - queue_delayed_work(system_wq, &mds->security.poll_dwork, > + mds->bgcmd.poll_tmo_secs = min(15 * 60, timeout); > + queue_delayed_work(system_wq, &mds->bgcmd.poll_dwork, > timeout * HZ); > } > mutex_unlock(&mds->mbox_mutex); > @@ -234,7 +232,8 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, > * not be in sync. Ensure no new command comes in until so. Keep the > * hardware semantics and only allow device health status. > */ > - if (mds->security.poll_tmo_secs > 0) { > + if (mds->bgcmd.poll_tmo_secs > 0 && > + mds->bgcmd.opcode == CXL_MBOX_OP_SANITIZE) { > if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO) > return -EBUSY; > } > @@ -289,31 +288,29 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, > u64 bg_status_reg; > int i, timeout; > > - /* > - * Sanitization is a special case which monopolizes the device > - * and cannot be timesliced. Handle asynchronously instead, > - * and allow userspace to poll(2) for completion. > - */ > - if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) { > - if (mds->security.poll) { > + dev_dbg(dev, "Mailbox background operation (0x%04x) started\n", > + mbox_cmd->opcode); > + > + mds->bgcmd.opcode = mbox_cmd->opcode; > + mds->bgcmd.mode = mbox_cmd->bg_mode; > + > + if (mbox_cmd->bg_mode & CXL_BG_CMD_ASYNC) { > + if (mbox_cmd->bg_mode == CXL_BG_CMD_ASYNC_POLL || > + mds->bgcmd.poll) { > /* hold the device throughout */ > get_device(cxlds->dev); > > /* give first timeout a second */ > timeout = 1; > - mds->security.poll_tmo_secs = timeout; > + mds->bgcmd.poll_tmo_secs = timeout; > queue_delayed_work(system_wq, > - &mds->security.poll_dwork, > + &mds->bgcmd.poll_dwork, > timeout * HZ); > } > > - dev_dbg(dev, "Sanitization operation started\n"); > goto success; > } > > - dev_dbg(dev, "Mailbox background operation (0x%04x) started\n", > - mbox_cmd->opcode); > - > timeout = mbox_cmd->poll_interval_ms; > for (i = 0; i < mbox_cmd->poll_count; i++) { > if (rcuwait_wait_event_timeout(&mds->mbox_wait, > @@ -460,8 +457,8 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds) > } > > mbox_poll: > - mds->security.poll = true; > - INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work); > + mds->bgcmd.poll = true; > + INIT_DELAYED_WORK(&mds->bgcmd.poll_dwork, cxl_mbox_bg_work); > > dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported"); > return 0;
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index f99e7ec3cc40..8d1692231767 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -537,13 +537,13 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, } EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL); -static void cxl_memdev_security_shutdown(struct device *dev) +static void cxl_memdev_bgcmd_shutdown(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); - if (mds->security.poll) - cancel_delayed_work_sync(&mds->security.poll_dwork); + if (mds->bgcmd.poll) + cancel_delayed_work_sync(&mds->bgcmd.poll_dwork); } static void cxl_memdev_shutdown(struct device *dev) @@ -551,7 +551,7 @@ static void cxl_memdev_shutdown(struct device *dev) struct cxl_memdev *cxlmd = to_cxl_memdev(dev); down_write(&cxl_memdev_rwsem); - cxl_memdev_security_shutdown(dev); + cxl_memdev_bgcmd_shutdown(dev); cxlmd->cxlds = NULL; up_write(&cxl_memdev_rwsem); } diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 79e99c873ca2..879903a8c822 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -100,6 +100,13 @@ static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port, return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev); } +enum cxl_bg_cmd_mode { +#define CXL_BG_CMD_ASYNC BIT_MASK(0) + CXL_BG_CMD_SYNC = 0x00, + CXL_BG_CMD_ASYNC_POLL = 0x01, + CXL_BG_CMD_ASYNC_INT = 0x03 +}; + /** * struct cxl_mbox_cmd - A command to be submitted to hardware. * @opcode: (input) The command set and command submitted to hardware. @@ -113,6 +120,8 @@ static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port, * variable sized output commands, it tells the exact number of bytes * written. * @min_out: (input) internal command output payload size validation + * @bg_mode: (input) The processing mode of a background command. It will be + * valid only when the background command started successfully. * @poll_count: (input) Number of timeouts to attempt. * @poll_interval_ms: (input) Time between mailbox background command polling * interval timeouts. @@ -131,6 +140,7 @@ struct cxl_mbox_cmd { size_t size_in; size_t size_out; size_t min_out; + int bg_mode; int poll_count; int poll_interval_ms; u16 return_code; @@ -346,17 +356,31 @@ struct cxl_fw_state { * struct cxl_security_state - Device security state * * @state: state of last security operation - * @poll: polling for sanitization is enabled, device has no mbox irq support - * @poll_tmo_secs: polling timeout - * @poll_dwork: polling work item * @sanitize_node: sanitation sysfs file to notify */ struct cxl_security_state { unsigned long state; + struct kernfs_node *sanitize_node; +}; + +/** + * struct cxl_bgcmd_state - Device background command state + * + * @poll: polling for background command completion, device has no mbox + * irq support + * @poll_tmo_secs: polling timeout + * @poll_dwork: polling work item + * @opcode: The background command submitted to hardware + * @mode: The background command submitted to hardware + * @complete_node: background command completion file to notify + */ +struct cxl_bgcmd_state { bool poll; int poll_tmo_secs; struct delayed_work poll_dwork; - struct kernfs_node *sanitize_node; + u16 opcode; + int mode; + struct kernfs_node *complete_node; }; /* @@ -460,6 +484,7 @@ struct cxl_memdev_state { struct cxl_poison_state poison; struct cxl_security_state security; struct cxl_fw_state fw; + struct cxl_bgcmd_state bgcmd; struct rcuwait mbox_wait; int (*mbox_send)(struct cxl_memdev_state *mds, diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 1cb1494c28fe..570fca24ab12 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -116,8 +116,6 @@ static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds) static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) { - u64 reg; - u16 opcode; struct cxl_dev_id *dev_id = id; struct cxl_dev_state *cxlds = dev_id->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); @@ -125,13 +123,12 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) if (!cxl_mbox_background_complete(cxlds)) return IRQ_NONE; - reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); - opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); - if (opcode == CXL_MBOX_OP_SANITIZE) { - if (mds->security.sanitize_node) - sysfs_notify_dirent(mds->security.sanitize_node); + if (mds->bgcmd.mode & CXL_BG_CMD_ASYNC) { + if (mds->bgcmd.complete_node) + sysfs_notify_dirent(mds->bgcmd.complete_node); - dev_dbg(cxlds->dev, "Sanitization operation ended\n"); + dev_dbg(cxlds->dev, "Mailbox background operation (0x%04x) ended\n", + mds->bgcmd.opcode); } else { /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */ rcuwait_wake_up(&mds->mbox_wait); @@ -141,28 +138,29 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) } /* - * Sanitization operation polling mode. + * Background operation polling mode. */ -static void cxl_mbox_sanitize_work(struct work_struct *work) +static void cxl_mbox_bg_work(struct work_struct *work) { struct cxl_memdev_state *mds = - container_of(work, typeof(*mds), security.poll_dwork.work); + container_of(work, typeof(*mds), bgcmd.poll_dwork.work); struct cxl_dev_state *cxlds = &mds->cxlds; mutex_lock(&mds->mbox_mutex); if (cxl_mbox_background_complete(cxlds)) { - mds->security.poll_tmo_secs = 0; + mds->bgcmd.poll_tmo_secs = 0; put_device(cxlds->dev); - if (mds->security.sanitize_node) - sysfs_notify_dirent(mds->security.sanitize_node); + if (mds->bgcmd.complete_node) + sysfs_notify_dirent(mds->bgcmd.complete_node); - dev_dbg(cxlds->dev, "Sanitization operation ended\n"); + dev_dbg(cxlds->dev, "Mailbox background operation (0x%04x) ended\n", + mds->bgcmd.opcode); } else { - int timeout = mds->security.poll_tmo_secs + 10; + int timeout = mds->bgcmd.poll_tmo_secs + 10; - mds->security.poll_tmo_secs = min(15 * 60, timeout); - queue_delayed_work(system_wq, &mds->security.poll_dwork, + mds->bgcmd.poll_tmo_secs = min(15 * 60, timeout); + queue_delayed_work(system_wq, &mds->bgcmd.poll_dwork, timeout * HZ); } mutex_unlock(&mds->mbox_mutex); @@ -234,7 +232,8 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, * not be in sync. Ensure no new command comes in until so. Keep the * hardware semantics and only allow device health status. */ - if (mds->security.poll_tmo_secs > 0) { + if (mds->bgcmd.poll_tmo_secs > 0 && + mds->bgcmd.opcode == CXL_MBOX_OP_SANITIZE) { if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO) return -EBUSY; } @@ -289,31 +288,29 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, u64 bg_status_reg; int i, timeout; - /* - * Sanitization is a special case which monopolizes the device - * and cannot be timesliced. Handle asynchronously instead, - * and allow userspace to poll(2) for completion. - */ - if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) { - if (mds->security.poll) { + dev_dbg(dev, "Mailbox background operation (0x%04x) started\n", + mbox_cmd->opcode); + + mds->bgcmd.opcode = mbox_cmd->opcode; + mds->bgcmd.mode = mbox_cmd->bg_mode; + + if (mbox_cmd->bg_mode & CXL_BG_CMD_ASYNC) { + if (mbox_cmd->bg_mode == CXL_BG_CMD_ASYNC_POLL || + mds->bgcmd.poll) { /* hold the device throughout */ get_device(cxlds->dev); /* give first timeout a second */ timeout = 1; - mds->security.poll_tmo_secs = timeout; + mds->bgcmd.poll_tmo_secs = timeout; queue_delayed_work(system_wq, - &mds->security.poll_dwork, + &mds->bgcmd.poll_dwork, timeout * HZ); } - dev_dbg(dev, "Sanitization operation started\n"); goto success; } - dev_dbg(dev, "Mailbox background operation (0x%04x) started\n", - mbox_cmd->opcode); - timeout = mbox_cmd->poll_interval_ms; for (i = 0; i < mbox_cmd->poll_count; i++) { if (rcuwait_wait_event_timeout(&mds->mbox_wait, @@ -460,8 +457,8 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds) } mbox_poll: - mds->security.poll = true; - INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work); + mds->bgcmd.poll = true; + INIT_DELAYED_WORK(&mds->bgcmd.poll_dwork, cxl_mbox_bg_work); dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported"); return 0;