Message ID | 20250309084110.458224773@linutronix.de (mailing list archive) |
---|---|
State | New |
Delegated to: | Bjorn Helgaas |
Headers | show |
Series | genirq/msi: Spring cleaning | expand |
On Sun, 9 Mar 2025 09:41:49 +0100 (CET) Thomas Gleixner <tglx@linutronix.de> wrote: > Convert the code to use the new guard(msi_descs_lock). > > No functional change intended. > > Signed-off-by: Thomas Gleixner <tglx@linutronix.de> > Cc: Bjorn Helgaas <bhelgaas@google.com> > Cc: linux-pci@vger.kernel.org This one runs into some of the stuff that the docs say to not do. I don't there there are bugs in here as such but it gets harder to reason about and fragile in the long run. Easy enough to avoid though - see inline. Jonathan > --- a/drivers/pci/msi/msi.c > +++ b/drivers/pci/msi/msi.c > @@ -351,7 +351,7 @@ static int msi_verify_entries(struct pci > static int msi_capability_init(struct pci_dev *dev, int nvec, > struct irq_affinity *affd) > { > - struct irq_affinity_desc *masks = NULL; > + struct irq_affinity_desc *masks __free(kfree) = NULL; This is a pattern that the cleanup.h docs talk about that Linus really didn't like in some of the early patches because of wanting constructors and destructors together. Maybe use an inline definition as struct irq_affinity_desc *masks = affd ? irq_create_affinity_masks(nvec, affd) : NULL; > struct msi_desc *entry, desc; > int ret; > > @@ -369,7 +369,7 @@ static int msi_capability_init(struct pc > if (affd) > masks = irq_create_affinity_masks(nvec, affd); > > - msi_lock_descs(&dev->dev); > + guard(msi_descs_lock)(&dev->dev); > ret = msi_setup_msi_desc(dev, nvec, masks); > if (ret) > goto fail; This runs against the advice in cleanup.h. It is probably correct and doesn't hit the undefined paths that motivated that guidance, but still maybe worth avoiding the mix of cleanup and goto handling. Easiest way being to factor out the code after guard to a helper. > @@ -399,16 +399,13 @@ static int msi_capability_init(struct pc > > pcibios_free_irq(dev); > dev->irq = entry->irq; > - goto unlock; > + return 0; > > err: > pci_msi_unmask(&desc, msi_multi_mask(&desc)); > pci_free_msi_irqs(dev); > fail: > dev->msi_enabled = 0; > -unlock: > - msi_unlock_descs(&dev->dev); > - kfree(masks); > return ret; > } > > @@ -669,13 +666,13 @@ static void msix_mask_all(void __iomem * > static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, > int nvec, struct irq_affinity *affd) > { > - struct irq_affinity_desc *masks = NULL; > + struct irq_affinity_desc *masks __free(kfree) = NULL; Similar to above. > int ret; > > if (affd) > masks = irq_create_affinity_masks(nvec, affd); > > - msi_lock_descs(&dev->dev); > + guard(msi_descs_lock)(&dev->dev); > ret = msix_setup_msi_descs(dev, entries, nvec, masks); > if (ret) > goto out_free; > @@ -690,13 +687,10 @@ static int msix_setup_interrupts(struct > goto out_free; > > msix_update_entries(dev, entries); > - goto out_unlock; > + return 0; > > out_free: Here as well. > pci_free_msi_irqs(dev); > -out_unlock: > - msi_unlock_descs(&dev->dev); > - kfree(masks); > return ret; > } > > @@ -871,13 +865,13 @@ void __pci_restore_msix_state(struct pci > > write_msg = arch_restore_msi_irqs(dev); > > - msi_lock_descs(&dev->dev); > - msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { > - if (write_msg) > - __pci_write_msi_msg(entry, &entry->msg); > - pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); > + scoped_guard (msi_descs_lock, &dev->dev) { > + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { > + if (write_msg) > + __pci_write_msi_msg(entry, &entry->msg); > + pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); > + } > } > - msi_unlock_descs(&dev->dev); > > pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); > } > > >
On Tue, Mar 11 2025 at 18:10, Jonathan Cameron wrote: > On Sun, 9 Mar 2025 09:41:49 +0100 (CET) > Thomas Gleixner <tglx@linutronix.de> wrote: > > This one runs into some of the stuff that the docs say > to not do. I don't there there are bugs in here as such > but it gets harder to reason about and fragile in the long > run. Easy enough to avoid though - see inline. Right. Updated variant below. Thanks, tglx --- --- a/drivers/pci/msi/api.c +++ b/drivers/pci/msi/api.c @@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev if (!pci_msi_enabled() || !dev || !dev->msi_enabled) return; - msi_lock_descs(&dev->dev); + guard(msi_descs_lock)(&dev->dev); pci_msi_shutdown(dev); pci_free_msi_irqs(dev); - msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msi); @@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *de if (!pci_msi_enabled() || !dev || !dev->msix_enabled) return; - msi_lock_descs(&dev->dev); + guard(msi_descs_lock)(&dev->dev); pci_msix_shutdown(dev); pci_free_msi_irqs(dev); - msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msix); --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -336,41 +336,11 @@ static int msi_verify_entries(struct pci return !entry ? 0 : -EIO; } -/** - * msi_capability_init - configure device's MSI capability structure - * @dev: pointer to the pci_dev data structure of MSI device function - * @nvec: number of interrupts to allocate - * @affd: description of automatic IRQ affinity assignments (may be %NULL) - * - * Setup the MSI capability structure of the device with the requested - * number of interrupts. A return value of zero indicates the successful - * setup of an entry with the new MSI IRQ. A negative return value indicates - * an error, and a positive return value indicates the number of interrupts - * which could have been allocated. - */ -static int msi_capability_init(struct pci_dev *dev, int nvec, - struct irq_affinity *affd) +static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks) { - struct irq_affinity_desc *masks = NULL; + int ret = msi_setup_msi_desc(dev, nvec, masks); struct msi_desc *entry, desc; - int ret; - - /* Reject multi-MSI early on irq domain enabled architectures */ - if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY)) - return 1; - - /* - * Disable MSI during setup in the hardware, but mark it enabled - * so that setup code can evaluate it. - */ - pci_msi_set_enable(dev, 0); - dev->msi_enabled = 1; - - if (affd) - masks = irq_create_affinity_masks(nvec, affd); - msi_lock_descs(&dev->dev); - ret = msi_setup_msi_desc(dev, nvec, masks); if (ret) goto fail; @@ -399,19 +369,48 @@ static int msi_capability_init(struct pc pcibios_free_irq(dev); dev->irq = entry->irq; - goto unlock; - + return 0; err: pci_msi_unmask(&desc, msi_multi_mask(&desc)); pci_free_msi_irqs(dev); fail: dev->msi_enabled = 0; -unlock: - msi_unlock_descs(&dev->dev); - kfree(masks); return ret; } +/** + * msi_capability_init - configure device's MSI capability structure + * @dev: pointer to the pci_dev data structure of MSI device function + * @nvec: number of interrupts to allocate + * @affd: description of automatic IRQ affinity assignments (may be %NULL) + * + * Setup the MSI capability structure of the device with the requested + * number of interrupts. A return value of zero indicates the successful + * setup of an entry with the new MSI IRQ. A negative return value indicates + * an error, and a positive return value indicates the number of interrupts + * which could have been allocated. + */ +static int msi_capability_init(struct pci_dev *dev, int nvec, + struct irq_affinity *affd) +{ + /* Reject multi-MSI early on irq domain enabled architectures */ + if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY)) + return 1; + + /* + * Disable MSI during setup in the hardware, but mark it enabled + * so that setup code can evaluate it. + */ + pci_msi_set_enable(dev, 0); + dev->msi_enabled = 1; + + struct irq_affinity_desc *masks __free(kfree) = + affd ? irq_create_affinity_masks(nvec, affd) : NULL; + + guard(msi_descs_lock)(&dev->dev); + return __msi_capability_init(dev, nvec, masks); +} + int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd) { @@ -666,37 +665,37 @@ static void msix_mask_all(void __iomem * writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); } -static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, - int nvec, struct irq_affinity *affd) +static int __msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, + int nvec, struct irq_affinity_desc *masks) { - struct irq_affinity_desc *masks = NULL; - int ret; + int ret = msix_setup_msi_descs(dev, entries, nvec, masks); - if (affd) - masks = irq_create_affinity_masks(nvec, affd); - - msi_lock_descs(&dev->dev); - ret = msix_setup_msi_descs(dev, entries, nvec, masks); if (ret) - goto out_free; + return ret; ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); if (ret) - goto out_free; + return ret; /* Check if all MSI entries honor device restrictions */ ret = msi_verify_entries(dev); if (ret) - goto out_free; + return ret; msix_update_entries(dev, entries); - goto out_unlock; + return 0; +} -out_free: - pci_free_msi_irqs(dev); -out_unlock: - msi_unlock_descs(&dev->dev); - kfree(masks); +static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, + int nvec, struct irq_affinity *affd) +{ + struct irq_affinity_desc *masks __free(kfree) = + affd ? irq_create_affinity_masks(nvec, affd) : NULL; + + guard(msi_descs_lock)(&dev->dev); + int ret = __msix_setup_interrupts(dev, entries, nvec, masks); + if (ret) + pci_free_msi_irqs(dev); return ret; } @@ -871,13 +870,13 @@ void __pci_restore_msix_state(struct pci write_msg = arch_restore_msi_irqs(dev); - msi_lock_descs(&dev->dev); - msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { - if (write_msg) - __pci_write_msi_msg(entry, &entry->msg); - pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); + scoped_guard (msi_descs_lock, &dev->dev) { + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { + if (write_msg) + __pci_write_msi_msg(entry, &entry->msg); + pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); + } } - msi_unlock_descs(&dev->dev); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); }
On Tue, 11 Mar 2025 22:45:44 +0100 Thomas Gleixner <tglx@linutronix.de> wrote: > On Tue, Mar 11 2025 at 18:10, Jonathan Cameron wrote: > > On Sun, 9 Mar 2025 09:41:49 +0100 (CET) > > Thomas Gleixner <tglx@linutronix.de> wrote: > > > > This one runs into some of the stuff that the docs say > > to not do. I don't there there are bugs in here as such > > but it gets harder to reason about and fragile in the long > > run. Easy enough to avoid though - see inline. > > Right. Updated variant below. LGTM Thanks, Jonathan
--- a/drivers/pci/msi/api.c +++ b/drivers/pci/msi/api.c @@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev if (!pci_msi_enabled() || !dev || !dev->msi_enabled) return; - msi_lock_descs(&dev->dev); + guard(msi_descs_lock)(&dev->dev); pci_msi_shutdown(dev); pci_free_msi_irqs(dev); - msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msi); @@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *de if (!pci_msi_enabled() || !dev || !dev->msix_enabled) return; - msi_lock_descs(&dev->dev); + guard(msi_descs_lock)(&dev->dev); pci_msix_shutdown(dev); pci_free_msi_irqs(dev); - msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msix); --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -351,7 +351,7 @@ static int msi_verify_entries(struct pci static int msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity *affd) { - struct irq_affinity_desc *masks = NULL; + struct irq_affinity_desc *masks __free(kfree) = NULL; struct msi_desc *entry, desc; int ret; @@ -369,7 +369,7 @@ static int msi_capability_init(struct pc if (affd) masks = irq_create_affinity_masks(nvec, affd); - msi_lock_descs(&dev->dev); + guard(msi_descs_lock)(&dev->dev); ret = msi_setup_msi_desc(dev, nvec, masks); if (ret) goto fail; @@ -399,16 +399,13 @@ static int msi_capability_init(struct pc pcibios_free_irq(dev); dev->irq = entry->irq; - goto unlock; + return 0; err: pci_msi_unmask(&desc, msi_multi_mask(&desc)); pci_free_msi_irqs(dev); fail: dev->msi_enabled = 0; -unlock: - msi_unlock_descs(&dev->dev); - kfree(masks); return ret; } @@ -669,13 +666,13 @@ static void msix_mask_all(void __iomem * static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, int nvec, struct irq_affinity *affd) { - struct irq_affinity_desc *masks = NULL; + struct irq_affinity_desc *masks __free(kfree) = NULL; int ret; if (affd) masks = irq_create_affinity_masks(nvec, affd); - msi_lock_descs(&dev->dev); + guard(msi_descs_lock)(&dev->dev); ret = msix_setup_msi_descs(dev, entries, nvec, masks); if (ret) goto out_free; @@ -690,13 +687,10 @@ static int msix_setup_interrupts(struct goto out_free; msix_update_entries(dev, entries); - goto out_unlock; + return 0; out_free: pci_free_msi_irqs(dev); -out_unlock: - msi_unlock_descs(&dev->dev); - kfree(masks); return ret; } @@ -871,13 +865,13 @@ void __pci_restore_msix_state(struct pci write_msg = arch_restore_msi_irqs(dev); - msi_lock_descs(&dev->dev); - msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { - if (write_msg) - __pci_write_msi_msg(entry, &entry->msg); - pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); + scoped_guard (msi_descs_lock, &dev->dev) { + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { + if (write_msg) + __pci_write_msi_msg(entry, &entry->msg); + pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); + } } - msi_unlock_descs(&dev->dev); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); }
Convert the code to use the new guard(msi_descs_lock). No functional change intended. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: linux-pci@vger.kernel.org --- drivers/pci/msi/api.c | 6 ++---- drivers/pci/msi/msi.c | 30 ++++++++++++------------------ 2 files changed, 14 insertions(+), 22 deletions(-)