@@ -18,14 +18,14 @@
/*
* Internal data structure containing a (made up, but unique) devid
- * and the callback to write the MSI message.
+ * and the platform-msi ops
*/
struct platform_msi_priv_data {
- struct device *dev;
- void *host_data;
- msi_alloc_info_t arg;
- irq_write_msi_msg_t write_msg;
- int devid;
+ struct device *dev;
+ void *host_data;
+ msi_alloc_info_t arg;
+ const struct platform_msi_ops *ops;
+ int devid;
};
/* The devid allocator */
@@ -83,7 +83,7 @@ static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
priv_data = desc->platform.msi_priv_data;
- priv_data->write_msg(desc, msg);
+ priv_data->ops->write_msg(desc, msg);
}
static void platform_msi_update_chip_ops(struct msi_domain_info *info)
@@ -194,16 +194,17 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
static struct platform_msi_priv_data *
platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
+ const struct platform_msi_ops *platform_ops)
{
struct platform_msi_priv_data *datap;
+
/*
* Limit the number of interrupts to 2048 per device. Should we
* need to bump this up, DEV_ID_SHIFT should be adjusted
* accordingly (which would impact the max number of MSI
* capable devices).
*/
- if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
+ if (!dev->msi_domain || !platform_ops->write_msg || !nvec || nvec > MAX_DEV_MSIS)
return ERR_PTR(-EINVAL);
if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
@@ -227,7 +228,7 @@ platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
return ERR_PTR(err);
}
- datap->write_msg = write_msi_msg;
+ datap->ops = platform_ops;
datap->dev = dev;
return datap;
@@ -249,12 +250,12 @@ static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
* Zero for success, or an error code in case of failure
*/
int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
+ const struct platform_msi_ops *platform_ops)
{
struct platform_msi_priv_data *priv_data;
int err;
- priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
+ priv_data = platform_msi_alloc_priv_data(dev, nvec, platform_ops);
if (IS_ERR(priv_data))
return PTR_ERR(priv_data);
@@ -324,7 +325,7 @@ struct irq_domain *
__platform_msi_create_device_domain(struct device *dev,
unsigned int nvec,
bool is_tree,
- irq_write_msi_msg_t write_msi_msg,
+ const struct platform_msi_ops *platform_ops,
const struct irq_domain_ops *ops,
void *host_data)
{
@@ -332,7 +333,7 @@ __platform_msi_create_device_domain(struct device *dev,
struct irq_domain *domain;
int err;
- data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
+ data = platform_msi_alloc_priv_data(dev, nvec, platform_ops);
if (IS_ERR(data))
return NULL;
@@ -710,6 +710,10 @@ static int mv_xor_v2_resume(struct platform_device *dev)
return 0;
}
+static const struct platform_msi_ops mv_xor_v2_msi_ops = {
+ .write_msg = mv_xor_v2_set_msi_msg,
+};
+
static int mv_xor_v2_probe(struct platform_device *pdev)
{
struct mv_xor_v2_device *xor_dev;
@@ -765,7 +769,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
}
ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
- mv_xor_v2_set_msi_msg);
+ &mv_xor_v2_msi_ops);
if (ret)
goto disable_clk;
@@ -678,6 +678,10 @@ static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
writel(msg->data, dmadev->dev_evca + 0x120);
}
}
+
+static const struct platform_msi_ops hidma_msi_ops = {
+ .write_msg = hidma_write_msi_msg,
+};
#endif
static void hidma_free_msis(struct hidma_dev *dmadev)
@@ -703,7 +707,7 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
struct msi_desc *failed_desc = NULL;
rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
- hidma_write_msi_msg);
+ &hidma_msi_ops);
if (rc)
return rc;
@@ -3410,6 +3410,10 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
}
+static const struct platform_msi_ops arm_smmu_msi_ops = {
+ .write_msg = arm_smmu_write_msi_msg,
+};
+
static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
{
struct msi_desc *desc;
@@ -3434,7 +3438,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
}
/* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
- ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
+ ret = platform_msi_domain_alloc_irqs(dev, nvec, &arm_smmu_msi_ops);
if (ret) {
dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
return;
@@ -232,6 +232,10 @@ static const struct irq_domain_ops mbigen_domain_ops = {
.free = mbigen_irq_domain_free,
};
+static const struct platform_msi_ops mbigen_msi_ops = {
+ .write_msg = mbigen_write_msg,
+};
+
static int mbigen_of_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
@@ -260,7 +264,7 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
}
domain = platform_msi_create_device_domain(&child->dev, num_pins,
- mbigen_write_msg,
+ &mbigen_msi_ops,
&mbigen_domain_ops,
mgn_chip);
if (!domain) {
@@ -308,7 +312,7 @@ static int mbigen_acpi_create_domain(struct platform_device *pdev,
return -EINVAL;
domain = platform_msi_create_device_domain(&pdev->dev, num_pins,
- mbigen_write_msg,
+ &mbigen_msi_ops,
&mbigen_domain_ops,
mgn_chip);
if (!domain)
@@ -295,6 +295,10 @@ static const struct of_device_id mvebu_icu_subset_of_match[] = {
{},
};
+static const struct platform_msi_ops mvebu_icu_msi_ops = {
+ .write_msg = mvebu_icu_write_msg,
+};
+
static int mvebu_icu_subset_probe(struct platform_device *pdev)
{
struct mvebu_icu_msi_data *msi_data;
@@ -324,7 +328,7 @@ static int mvebu_icu_subset_probe(struct platform_device *pdev)
return -ENODEV;
irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS,
- mvebu_icu_write_msg,
+ &mvebu_icu_msi_ops,
&mvebu_icu_domain_ops,
msi_data);
if (!irq_domain) {
@@ -1492,6 +1492,10 @@ static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE);
}
+static const struct platform_msi_ops flexrm_mbox_msi_ops = {
+ .write_msg = flexrm_mbox_msi_write,
+};
+
static int flexrm_mbox_probe(struct platform_device *pdev)
{
int index, ret = 0;
@@ -1604,7 +1608,7 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
/* Allocate platform MSIs for each ring */
ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
- flexrm_mbox_msi_write);
+ &flexrm_mbox_msi_ops);
if (ret)
goto fail_destroy_cmpl_pool;
@@ -652,6 +652,10 @@ static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
}
+static const struct platform_msi_ops smmu_pmu_msi_ops = {
+ .write_msg = smmu_pmu_write_msi_msg,
+};
+
static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
{
struct msi_desc *desc;
@@ -665,7 +669,7 @@ static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
return;
- ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
+ ret = platform_msi_domain_alloc_irqs(dev, 1, &smmu_pmu_msi_ops);
if (ret) {
dev_warn(dev, "failed to allocate MSIs\n");
return;
@@ -321,6 +321,14 @@ enum {
MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
};
+/*
+ * platform_msi_ops - Callbacks for platform MSI ops
+ * @write_msg: write message content
+ */
+struct platform_msi_ops {
+ irq_write_msi_msg_t write_msg;
+};
+
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force);
@@ -336,7 +344,7 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg);
+ const struct platform_msi_ops *platform_ops);
void platform_msi_domain_free_irqs(struct device *dev);
/* When an MSI domain is used as an intermediate domain */
@@ -348,14 +356,14 @@ struct irq_domain *
__platform_msi_create_device_domain(struct device *dev,
unsigned int nvec,
bool is_tree,
- irq_write_msi_msg_t write_msi_msg,
+ const struct platform_msi_ops *platform_ops,
const struct irq_domain_ops *ops,
void *host_data);
-#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
- __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
-#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
- __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
+#define platform_msi_create_device_domain(dev, nvec, p_ops, ops, data) \
+ __platform_msi_create_device_domain(dev, nvec, false, p_ops, ops, data)
+#define platform_msi_create_device_tree_domain(dev, nvec, p_ops, ops, data) \
+ __platform_msi_create_device_domain(dev, nvec, true, p_ops, ops, data)
int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs);