@@ -259,34 +259,29 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
return -ENOMEM;
dev = idxd_cdev->dev;
+ device_initialize(dev);
dev->parent = &idxd->pdev->dev;
- dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
- idxd->id, wq->id);
dev->bus = idxd_get_bus_type(idxd);
+ dev->type = &idxd_cdev_device_type;
+ rc = dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
+ idxd->id, wq->id);
+ if (rc < 0)
+ goto dev_set_err;
cdev_ctx = &ictx[wq->idxd->type];
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) {
rc = minor;
- kfree(dev);
- goto ida_err;
+ goto dev_set_err;
}
dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
- dev->type = &idxd_cdev_device_type;
- rc = device_register(dev);
- if (rc < 0) {
- dev_err(&idxd->pdev->dev, "device register failed\n");
- goto dev_reg_err;
- }
idxd_cdev->minor = minor;
return 0;
- dev_reg_err:
- ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
+ dev_set_err:
put_device(dev);
- ida_err:
idxd_cdev->dev = NULL;
return rc;
}
@@ -296,18 +291,24 @@ static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
{
struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
struct idxd_cdev_context *cdev_ctx;
+ struct device *dev;
+ int minor;
cdev_ctx = &ictx[wq->idxd->type];
- if (cdev_state == CDEV_NORMAL)
- cdev_del(&idxd_cdev->cdev);
- device_unregister(idxd_cdev->dev);
+ minor = idxd_cdev->minor;
+ dev = idxd_cdev->dev;
+ if (cdev_state == CDEV_NORMAL) {
+ cdev_device_del(&idxd_cdev->cdev, dev);
+ put_device(dev);
+ } else {
+ device_unregister(dev);
+ }
+
/*
* The device_type->release() will be called on the device and free
* the allocated struct device. We can just forget it.
*/
- ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
- idxd_cdev->dev = NULL;
- idxd_cdev->minor = -1;
+ ida_simple_remove(&cdev_ctx->minor_ida, minor);
}
int idxd_wq_add_cdev(struct idxd_wq *wq)
@@ -323,15 +324,14 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
dev = idxd_cdev->dev;
cdev_init(cdev, &idxd_cdev_fops);
- cdev_set_parent(cdev, &dev->kobj);
- rc = cdev_add(cdev, dev->devt, 1);
+ init_waitqueue_head(&idxd_cdev->err_queue);
+ rc = cdev_device_add(cdev, dev);
if (rc) {
dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
return rc;
}
- init_waitqueue_head(&idxd_cdev->err_queue);
return 0;
}
@@ -19,7 +19,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
/* Interrupt control bits */
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
{
- struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
+ struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
pci_msi_mask_irq(data);
}
@@ -36,7 +36,7 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
{
- struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
+ struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
pci_msi_unmask_irq(data);
}
@@ -515,7 +515,7 @@ void idxd_device_wqs_clear_state(struct idxd_device *idxd)
lockdep_assert_held(&idxd->dev_lock);
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
@@ -624,7 +624,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
+ struct idxd_group *group = idxd->groups[i];
idxd_group_config_write(group);
}
@@ -696,7 +696,7 @@ static int idxd_wqs_config_write(struct idxd_device *idxd)
int i, rc;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
rc = idxd_wq_config_write(wq);
if (rc < 0)
@@ -712,7 +712,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
/* TC-A 0 and TC-B 1 should be defaults */
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
+ struct idxd_group *group = idxd->groups[i];
if (group->tc_a == -1)
group->tc_a = group->grpcfg.flags.tc_a = 0;
@@ -739,12 +739,12 @@ static int idxd_engines_setup(struct idxd_device *idxd)
struct idxd_group *group;
for (i = 0; i < idxd->max_groups; i++) {
- group = &idxd->groups[i];
+ group = idxd->groups[i];
group->grpcfg.engines = 0;
}
for (i = 0; i < idxd->max_engines; i++) {
- eng = &idxd->engines[i];
+ eng = idxd->engines[i];
group = eng->group;
if (!group)
@@ -768,13 +768,13 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
struct device *dev = &idxd->pdev->dev;
for (i = 0; i < idxd->max_groups; i++) {
- group = &idxd->groups[i];
+ group = idxd->groups[i];
for (j = 0; j < 4; j++)
group->grpcfg.wqs[j] = 0;
}
for (i = 0; i < idxd->max_wqs; i++) {
- wq = &idxd->wqs[i];
+ wq = idxd->wqs[i];
group = wq->group;
if (!wq->group)
@@ -156,11 +156,15 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
static void idxd_dma_release(struct dma_device *device)
{
+ struct idxd_device *idxd = container_of(device, struct idxd_device, dma_dev);
+
+ put_device(&idxd->conf_dev);
}
int idxd_register_dma_device(struct idxd_device *idxd)
{
struct dma_device *dma = &idxd->dma_dev;
+ int rc;
INIT_LIST_HEAD(&dma->channels);
dma->dev = &idxd->pdev->dev;
@@ -178,8 +182,15 @@ int idxd_register_dma_device(struct idxd_device *idxd)
dma->device_issue_pending = idxd_dma_issue_pending;
dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
dma->device_free_chan_resources = idxd_dma_free_chan_resources;
+ get_device(&idxd->conf_dev);
- return dma_async_device_register(&idxd->dma_dev);
+ rc = dma_async_device_register(&idxd->dma_dev);
+ if (rc < 0) {
+ put_device(&idxd->conf_dev);
+ return rc;
+ }
+
+ return 0;
}
void idxd_unregister_dma_device(struct idxd_device *idxd)
@@ -33,6 +33,7 @@ struct idxd_device_driver {
struct idxd_irq_entry {
struct idxd_device *idxd;
int id;
+ int vector;
struct llist_head pending_llist;
struct list_head work_list;
/*
@@ -178,9 +179,9 @@ struct idxd_device {
spinlock_t dev_lock; /* spinlock for device */
struct completion *cmd_done;
- struct idxd_group *groups;
- struct idxd_wq *wqs;
- struct idxd_engine *engines;
+ struct idxd_group **groups;
+ struct idxd_wq **wqs;
+ struct idxd_engine **engines;
struct iommu_sva *sva;
unsigned int pasid;
@@ -206,7 +207,6 @@ struct idxd_device {
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
- struct msix_entry *msix_entries;
int num_wq_irqs;
struct idxd_irq_entry *irq_entries;
@@ -242,6 +242,41 @@ extern struct bus_type dsa_bus_type;
extern struct bus_type iax_bus_type;
extern bool support_enqcmd;
+extern struct device_type dsa_device_type;
+extern struct device_type iax_device_type;
+extern struct device_type idxd_wq_device_type;
+
+static inline bool is_dsa_dev(struct device *dev)
+{
+ return (dev->type == &dsa_device_type);
+}
+
+static inline bool is_iax_dev(struct device *dev)
+{
+ return (dev->type == &iax_device_type);
+}
+
+static inline bool is_idxd_dev(struct device *dev)
+{
+ return is_dsa_dev(dev) || is_iax_dev(dev);
+}
+
+static inline bool is_idxd_wq_dev(struct device *dev)
+{
+ return (dev->type == &idxd_wq_device_type);
+}
+
+static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
+{
+ if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
+ return true;
+ return false;
+}
+
+static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+{
+ return wq->type == IDXD_WQT_USER;
+}
static inline bool wq_dedicated(struct idxd_wq *wq)
{
@@ -61,7 +61,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
struct device *dev = &pdev->dev;
- struct msix_entry *msix;
struct idxd_irq_entry *irq_entry;
int i, msixcnt;
int rc = 0;
@@ -70,23 +69,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) {
dev_err(dev, "Not MSI-X interrupt capable.\n");
- goto err_no_irq;
+ return -ENXIO;
}
- idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
- msixcnt, GFP_KERNEL);
- if (!idxd->msix_entries) {
- rc = -ENOMEM;
- goto err_no_irq;
- }
-
- for (i = 0; i < msixcnt; i++)
- idxd->msix_entries[i].entry = i;
-
- rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
- if (rc) {
- dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
- goto err_no_irq;
+ rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
+ if (rc != msixcnt) {
+ dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
+ return -ENOSPC;
}
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
@@ -94,53 +83,48 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
* We implement 1 completion list per MSI-X entry except for
* entry 0, which is for errors and others.
*/
- idxd->irq_entries = devm_kcalloc(dev, msixcnt,
- sizeof(struct idxd_irq_entry),
- GFP_KERNEL);
+ idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
+ GFP_KERNEL, dev_to_node(dev));
if (!idxd->irq_entries) {
rc = -ENOMEM;
- goto err_no_irq;
+ goto err_ie_alloc;
}
for (i = 0; i < msixcnt; i++) {
idxd->irq_entries[i].id = i;
idxd->irq_entries[i].idxd = idxd;
+ idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
spin_lock_init(&idxd->irq_entries[i].list_lock);
}
- msix = &idxd->msix_entries[0];
irq_entry = &idxd->irq_entries[0];
- rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
- idxd_misc_thread, 0, "idxd-misc",
- irq_entry);
+ rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler, idxd_misc_thread,
+ 0, "idxd-misc", irq_entry);
if (rc < 0) {
dev_err(dev, "Failed to allocate misc interrupt.\n");
- goto err_no_irq;
+ goto err_misc_irq;
}
dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
- msix->vector);
+ irq_entry->vector);
/* first MSI-X entry is not for wq interrupts */
idxd->num_wq_irqs = msixcnt - 1;
for (i = 1; i < msixcnt; i++) {
- msix = &idxd->msix_entries[i];
irq_entry = &idxd->irq_entries[i];
init_llist_head(&idxd->irq_entries[i].pending_llist);
INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
- rc = devm_request_threaded_irq(dev, msix->vector,
- idxd_irq_handler,
- idxd_wq_thread, 0,
- "idxd-portal", irq_entry);
+ rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler,
+ idxd_wq_thread, 0, "idxd-portal", irq_entry);
if (rc < 0) {
dev_err(dev, "Failed to allocate irq %d.\n",
- msix->vector);
- goto err_no_irq;
+ irq_entry->vector);
+ goto err_wq_irqs;
}
dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
- i, msix->vector);
+ i, irq_entry->vector);
}
idxd_unmask_error_interrupts(idxd);
@@ -154,44 +138,137 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
return 0;
- err_no_irq:
- /* Disable error interrupt generation */
- idxd_mask_error_interrupts(idxd);
- pci_disable_msix(pdev);
+ err_wq_irqs:
+ while (--i) {
+ irq_entry = &idxd->irq_entries[i];
+ free_irq(irq_entry->vector, irq_entry);
+ }
+ err_misc_irq:
+ kfree(idxd->irq_entries);
+ err_ie_alloc:
+ pci_free_irq_vectors(pdev);
dev_err(dev, "No usable interrupts\n");
return rc;
}
-static int idxd_setup_internals(struct idxd_device *idxd)
+static int idxd_allocate_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
- int i;
+ struct idxd_wq *wq;
+ int i, rc;
- init_waitqueue_head(&idxd->cmd_waitq);
- idxd->groups = devm_kcalloc(dev, idxd->max_groups,
- sizeof(struct idxd_group), GFP_KERNEL);
- if (!idxd->groups)
+ idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->wqs)
return -ENOMEM;
- for (i = 0; i < idxd->max_groups; i++) {
- idxd->groups[i].idxd = idxd;
- idxd->groups[i].id = i;
- idxd->groups[i].tc_a = -1;
- idxd->groups[i].tc_b = -1;
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
+ if (!wq) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ idxd->wqs[i] = wq;
}
- idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
- GFP_KERNEL);
- if (!idxd->wqs)
- return -ENOMEM;
+ return 0;
+
+ err:
+ while (--i)
+ kfree(idxd->wqs[i]);
+ kfree(idxd->wqs);
+ idxd->wqs = NULL;
+ return rc;
+}
- idxd->engines = devm_kcalloc(dev, idxd->max_engines,
- sizeof(struct idxd_engine), GFP_KERNEL);
+static int idxd_allocate_engines(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
+ GFP_KERNEL, dev_to_node(dev));
if (!idxd->engines)
return -ENOMEM;
+ for (i = 0; i < idxd->max_engines; i++) {
+ engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
+ if (!engine) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ idxd->engines[i] = engine;
+ }
+
+ return 0;
+
+ err:
+ while (--i)
+ kfree(idxd->engines[i]);
+ kfree(idxd->engines);
+ idxd->engines = NULL;
+ return rc;
+}
+
+static int idxd_allocate_groups(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ struct idxd_group *group;
+ int i, rc;
+
+ idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->groups)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
+ if (!group) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ idxd->groups[i] = group;
+ }
+
+ return 0;
+
+ err:
+ while (--i)
+ kfree(idxd->groups[i]);
+ kfree(idxd->groups);
+ idxd->groups = NULL;
+ return rc;
+}
+
+static int idxd_setup_internals(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ init_waitqueue_head(&idxd->cmd_waitq);
+ rc = idxd_allocate_groups(idxd);
+ if (rc < 0)
+ return rc;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = idxd->groups[i];
+
+ group->idxd = idxd;
+ group->id = i;
+ group->tc_a = -1;
+ group->tc_b = -1;
+ }
+
+ rc = idxd_allocate_wqs(idxd);
+ if (rc < 0)
+ return rc;
+
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
wq->id = i;
wq->idxd = idxd;
@@ -199,14 +276,20 @@ static int idxd_setup_internals(struct idxd_device *idxd)
wq->idxd_cdev.minor = -1;
wq->max_xfer_bytes = idxd->max_xfer_bytes;
wq->max_batch_size = idxd->max_batch_size;
- wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
+ wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg)
return -ENOMEM;
}
+ rc = idxd_allocate_engines(idxd);
+ if (rc < 0)
+ return rc;
+
for (i = 0; i < idxd->max_engines; i++) {
- idxd->engines[i].idxd = idxd;
- idxd->engines[i].id = i;
+ struct idxd_engine *engine = idxd->engines[i];
+
+ engine->idxd = idxd;
+ engine->id = i;
}
idxd->wq = create_workqueue(dev_name(dev));
@@ -288,7 +371,7 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
- idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
+ idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
if (!idxd)
return NULL;
@@ -398,6 +481,33 @@ static void idxd_type_init(struct idxd_device *idxd)
idxd->compl_size = sizeof(struct iax_completion_record);
}
+static void idxd_free(struct idxd_device *idxd)
+{
+ int i;
+
+ if (idxd->wqs) {
+ for (i = 0; i < idxd->max_wqs; i++) {
+ kfree(idxd->wqs[i]->wqcfg);
+ kfree(idxd->wqs[i]);
+ }
+ kfree(idxd->wqs);
+ }
+
+ if (idxd->engines) {
+ for (i = 0; i < idxd->max_engines; i++)
+ kfree(idxd->engines[i]);
+ kfree(idxd->engines);
+ }
+
+ if (idxd->groups) {
+ for (i = 0; i < idxd->max_groups; i++)
+ kfree(idxd->groups[i]);
+ kfree(idxd->groups);
+ }
+
+ kfree(idxd);
+}
+
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -415,21 +525,23 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_dbg(dev, "Mapping BARs\n");
idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
- if (!idxd->reg_base)
- return -ENOMEM;
+ if (!idxd->reg_base) {
+ rc = -ENOMEM;
+ goto err;
+ }
dev_dbg(dev, "Set DMA masks\n");
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
- return rc;
+ goto err;
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc)
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
- return rc;
+ goto err;
idxd_set_type(idxd);
@@ -443,13 +555,15 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = idxd_probe(idxd);
if (rc) {
dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto err;
}
rc = idxd_setup_sysfs(idxd);
if (rc) {
dev_err(dev, "IDXD sysfs setup failed\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto err;
}
idxd->state = IDXD_DEV_CONF_READY;
@@ -458,6 +572,10 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
idxd->hw.version);
return 0;
+
+ err:
+ idxd_free(idxd);
+ return rc;
}
static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
@@ -503,28 +621,33 @@ static void idxd_shutdown(struct pci_dev *pdev)
for (i = 0; i < msixcnt; i++) {
irq_entry = &idxd->irq_entries[i];
- synchronize_irq(idxd->msix_entries[i].vector);
+ free_irq(irq_entry->vector, irq_entry);
if (i == 0)
continue;
idxd_flush_pending_llist(irq_entry);
idxd_flush_work_list(irq_entry);
}
+ pci_free_irq_vectors(pdev);
destroy_workqueue(idxd->wq);
}
static void idxd_remove(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
+ int id = idxd->id;
+ enum idxd_type type = idxd->type;
dev_dbg(&pdev->dev, "%s called\n", __func__);
- idxd_cleanup_sysfs(idxd);
idxd_shutdown(pdev);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
+ idxd_cleanup_sysfs(idxd);
mutex_lock(&idxd_idr_lock);
- idr_remove(&idxd_idrs[idxd->type], idxd->id);
+ idr_remove(&idxd_idrs[type], id);
mutex_unlock(&idxd_idr_lock);
+ /* Release to free everything */
+ put_device(&idxd->conf_dev);
}
static struct pci_driver idxd_pci_driver = {
@@ -45,7 +45,7 @@ static void idxd_device_reinit(struct work_struct *work)
goto out;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->state == IDXD_WQ_ENABLED) {
rc = idxd_wq_enable(wq);
@@ -130,7 +130,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
int id = idxd->sw_err.wq_idx;
- struct idxd_wq *wq = &idxd->wqs[id];
+ struct idxd_wq *wq = idxd->wqs[id];
if (wq->type == IDXD_WQT_USER)
wake_up_interruptible(&wq->idxd_cdev.err_queue);
@@ -138,7 +138,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
int i;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->type == IDXD_WQT_USER)
wake_up_interruptible(&wq->idxd_cdev.err_queue);
@@ -10,75 +10,13 @@
#include "registers.h"
#include "idxd.h"
+
static char *idxd_wq_type_names[] = {
[IDXD_WQT_NONE] = "none",
[IDXD_WQT_KERNEL] = "kernel",
[IDXD_WQT_USER] = "user",
};
-static void idxd_conf_device_release(struct device *dev)
-{
- dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
-}
-
-static struct device_type idxd_group_device_type = {
- .name = "group",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type idxd_wq_device_type = {
- .name = "wq",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type idxd_engine_device_type = {
- .name = "engine",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type dsa_device_type = {
- .name = "dsa",
- .release = idxd_conf_device_release,
-};
-
-static struct device_type iax_device_type = {
- .name = "iax",
- .release = idxd_conf_device_release,
-};
-
-static inline bool is_dsa_dev(struct device *dev)
-{
- return dev ? dev->type == &dsa_device_type : false;
-}
-
-static inline bool is_iax_dev(struct device *dev)
-{
- return dev ? dev->type == &iax_device_type : false;
-}
-
-static inline bool is_idxd_dev(struct device *dev)
-{
- return is_dsa_dev(dev) || is_iax_dev(dev);
-}
-
-static inline bool is_idxd_wq_dev(struct device *dev)
-{
- return dev ? dev->type == &idxd_wq_device_type : false;
-}
-
-static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
-{
- if (wq->type == IDXD_WQT_KERNEL &&
- strcmp(wq->name, "dmaengine") == 0)
- return true;
- return false;
-}
-
-static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
-{
- return wq->type == IDXD_WQT_USER;
-}
-
static int idxd_config_bus_match(struct device *dev,
struct device_driver *drv)
{
@@ -327,7 +265,7 @@ static int idxd_config_bus_remove(struct device *dev)
dev_dbg(dev, "%s removing dev %s\n", __func__,
dev_name(&idxd->conf_dev));
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (wq->state == IDXD_WQ_DISABLED)
continue;
@@ -339,7 +277,7 @@ static int idxd_config_bus_remove(struct device *dev)
idxd_unregister_dma_device(idxd);
rc = idxd_device_disable(idxd);
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
mutex_lock(&wq->wq_lock);
idxd_wq_disable_cleanup(wq);
@@ -493,7 +431,7 @@ static ssize_t engine_group_id_store(struct device *dev,
if (prevg)
prevg->num_engines--;
- engine->group = &idxd->groups[id];
+ engine->group = idxd->groups[id];
engine->group->num_engines++;
return count;
@@ -517,6 +455,19 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
NULL,
};
+static void idxd_conf_engine_release(struct device *dev)
+{
+ struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
+
+ kfree(engine);
+}
+
+static struct device_type idxd_engine_device_type = {
+ .name = "engine",
+ .release = idxd_conf_engine_release,
+ .groups = idxd_engine_attribute_groups,
+};
+
/* Group attributes */
static void idxd_set_free_tokens(struct idxd_device *idxd)
@@ -524,7 +475,7 @@ static void idxd_set_free_tokens(struct idxd_device *idxd)
int i, tokens;
for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
- struct idxd_group *g = &idxd->groups[i];
+ struct idxd_group *g = idxd->groups[i];
tokens += g->tokens_reserved;
}
@@ -679,7 +630,7 @@ static ssize_t group_engines_show(struct device *dev,
struct idxd_device *idxd = group->idxd;
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = &idxd->engines[i];
+ struct idxd_engine *engine = idxd->engines[i];
if (!engine->group)
continue;
@@ -708,7 +659,7 @@ static ssize_t group_work_queues_show(struct device *dev,
struct idxd_device *idxd = group->idxd;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
if (!wq->group)
continue;
@@ -829,6 +780,19 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
NULL,
};
+static void idxd_conf_group_release(struct device *dev)
+{
+ struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
+
+ kfree(group);
+}
+
+static struct device_type idxd_group_device_type = {
+ .name = "group",
+ .release = idxd_conf_group_release,
+ .groups = idxd_group_attribute_groups,
+};
+
/* IDXD work queue attribs */
static ssize_t wq_clients_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -901,7 +865,7 @@ static ssize_t wq_group_id_store(struct device *dev,
return count;
}
- group = &idxd->groups[id];
+ group = idxd->groups[id];
prevg = wq->group;
if (prevg)
@@ -965,7 +929,7 @@ static int total_claimed_wq_size(struct idxd_device *idxd)
int wq_size = 0;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
wq_size += wq->size;
}
@@ -1361,6 +1325,20 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
NULL,
};
+static void idxd_conf_wq_release(struct device *dev)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ kfree(wq->wqcfg);
+ kfree(wq);
+}
+
+struct device_type idxd_wq_device_type = {
+ .name = "wq",
+ .release = idxd_conf_wq_release,
+ .groups = idxd_wq_attribute_groups,
+};
+
/* IDXD device attribs */
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1485,7 +1463,7 @@ static ssize_t clients_show(struct device *dev,
spin_lock_irqsave(&idxd->dev_lock, flags);
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
count += wq->client_count;
}
@@ -1643,36 +1621,59 @@ static const struct attribute_group *idxd_attribute_groups[] = {
NULL,
};
+static void idxd_conf_device_release(struct device *dev)
+{
+ struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
+
+ kfree(idxd->groups);
+ kfree(idxd->wqs);
+ kfree(idxd->engines);
+ kfree(idxd->irq_entries);
+ kfree(idxd);
+}
+
+struct device_type dsa_device_type = {
+ .name = "dsa",
+ .release = idxd_conf_device_release,
+ .groups = idxd_attribute_groups,
+};
+
+struct device_type iax_device_type = {
+ .name = "iax",
+ .release = idxd_conf_device_release,
+ .groups = idxd_attribute_groups,
+};
+
static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
int i, rc;
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = &idxd->engines[i];
+ struct idxd_engine *engine = idxd->engines[i];
+ device_initialize(&engine->conf_dev);
engine->conf_dev.parent = &idxd->conf_dev;
- dev_set_name(&engine->conf_dev, "engine%d.%d",
- idxd->id, engine->id);
- engine->conf_dev.bus = idxd_get_bus_type(idxd);
- engine->conf_dev.groups = idxd_engine_attribute_groups;
engine->conf_dev.type = &idxd_engine_device_type;
+ engine->conf_dev.bus = idxd_get_bus_type(idxd);
+ rc = dev_set_name(&engine->conf_dev, "engine%d.%d",
+ idxd->id, engine->id);
+ if (rc < 0)
+ goto cleanup;
dev_dbg(dev, "Engine device register: %s\n",
dev_name(&engine->conf_dev));
- rc = device_register(&engine->conf_dev);
- if (rc < 0) {
- put_device(&engine->conf_dev);
+ rc = device_add(&engine->conf_dev);
+ if (rc < 0)
goto cleanup;
- }
}
return 0;
cleanup:
while (i--) {
- struct idxd_engine *engine = &idxd->engines[i];
+ struct idxd_engine *engine = idxd->engines[i];
- device_unregister(&engine->conf_dev);
+ put_device(&engine->conf_dev);
}
return rc;
}
@@ -1683,30 +1684,29 @@ static int idxd_setup_group_sysfs(struct idxd_device *idxd)
int i, rc;
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
+ struct idxd_group *group = idxd->groups[i];
+ device_initialize(&group->conf_dev);
group->conf_dev.parent = &idxd->conf_dev;
- dev_set_name(&group->conf_dev, "group%d.%d",
- idxd->id, group->id);
group->conf_dev.bus = idxd_get_bus_type(idxd);
- group->conf_dev.groups = idxd_group_attribute_groups;
group->conf_dev.type = &idxd_group_device_type;
+ rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
+ if (rc < 0)
+ goto cleanup;
dev_dbg(dev, "Group device register: %s\n",
dev_name(&group->conf_dev));
- rc = device_register(&group->conf_dev);
- if (rc < 0) {
- put_device(&group->conf_dev);
+ rc = device_add(&group->conf_dev);
+ if (rc < 0)
goto cleanup;
- }
}
return 0;
cleanup:
while (i--) {
- struct idxd_group *group = &idxd->groups[i];
+ struct idxd_group *group = idxd->groups[i];
- device_unregister(&group->conf_dev);
+ put_device(&group->conf_dev);
}
return rc;
}
@@ -1717,29 +1717,29 @@ static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
int i, rc;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
+ device_initialize(&wq->conf_dev);
wq->conf_dev.parent = &idxd->conf_dev;
- dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
wq->conf_dev.bus = idxd_get_bus_type(idxd);
- wq->conf_dev.groups = idxd_wq_attribute_groups;
wq->conf_dev.type = &idxd_wq_device_type;
+ rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+ if (rc < 0)
+ goto cleanup;
dev_dbg(dev, "WQ device register: %s\n",
dev_name(&wq->conf_dev));
- rc = device_register(&wq->conf_dev);
- if (rc < 0) {
- put_device(&wq->conf_dev);
+ rc = device_add(&wq->conf_dev);
+ if (rc < 0)
goto cleanup;
- }
}
return 0;
cleanup:
while (i--) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
- device_unregister(&wq->conf_dev);
+ put_device(&wq->conf_dev);
}
return rc;
}
@@ -1748,22 +1748,27 @@ static int idxd_setup_device_sysfs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
int rc;
- char devname[IDXD_NAME_SIZE];
- sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
+ device_initialize(&idxd->conf_dev);
idxd->conf_dev.parent = dev;
- dev_set_name(&idxd->conf_dev, "%s", devname);
idxd->conf_dev.bus = idxd_get_bus_type(idxd);
- idxd->conf_dev.groups = idxd_attribute_groups;
idxd->conf_dev.type = idxd_get_device_type(idxd);
+ rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd_get_dev_name(idxd), idxd->id);
+ if (rc < 0) {
+ put_device(&idxd->conf_dev);
+ return rc;
+ }
dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
- rc = device_register(&idxd->conf_dev);
+ rc = device_add(&idxd->conf_dev);
if (rc < 0) {
put_device(&idxd->conf_dev);
return rc;
}
+ /* Hold a kref for cleanup */
+ get_device(&idxd->conf_dev);
+
return 0;
}
@@ -1807,19 +1812,19 @@ void idxd_cleanup_sysfs(struct idxd_device *idxd)
int i;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
+ struct idxd_wq *wq = idxd->wqs[i];
device_unregister(&wq->conf_dev);
}
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = &idxd->engines[i];
+ struct idxd_engine *engine = idxd->engines[i];
device_unregister(&engine->conf_dev);
}
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = &idxd->groups[i];
+ struct idxd_group *group = idxd->groups[i];
device_unregister(&group->conf_dev);
}