@@ -13,6 +13,7 @@
#include <linux/msi.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/fsl/mc.h>
#include "../../include/dpaa2-io.h"
@@ -146,10 +147,20 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
* Set the CENA regs to be the cache inhibited area of the portal to
* avoid coherency issues if a user migrates to another core.
*/
- desc.regs_cena = ioremap_wc(dpio_dev->regions[1].start,
- resource_size(&dpio_dev->regions[1]));
- desc.regs_cinh = ioremap(dpio_dev->regions[1].start,
- resource_size(&dpio_dev->regions[1]));
+ desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]),
+ MEMREMAP_WC);
+ if (!desc.regs_cena) {
+ dev_err(dev, "devm_memremap failed\n");
+ goto err_allocate_irqs;
+ }
+
+ desc.regs_cinh = devm_ioremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]));
+ if (!desc.regs_cinh) {
+ dev_err(dev, "devm_ioremap failed\n");
+ goto err_allocate_irqs;
+ }
err = fsl_mc_allocate_irqs(dpio_dev);
if (err) {
@@ -15,7 +15,7 @@ struct qbman_swp;
/* qbman software portal descriptor structure */
struct qbman_swp_desc {
void *cena_bar; /* Cache-enabled portal base address */
- void *cinh_bar; /* Cache-inhibited portal base address */
+ void __iomem *cinh_bar; /* Cache-inhibited portal base address */
u32 qman_version;
};
@@ -102,7 +102,7 @@ struct qbman_release_desc {
/* portal data structure */
struct qbman_swp {
const struct qbman_swp_desc *desc;
- void __iomem *addr_cena;
+ void *addr_cena;
void __iomem *addr_cinh;
/* Management commands */
@@ -52,7 +52,7 @@ struct dpaa2_io_desc {
int has_8prio;
int cpu;
void *regs_cena;
- void *regs_cinh;
+ void __iomem *regs_cinh;
int dpio_id;
u32 qman_version;
};
Change the mapping of the QBMan cache enabled area from using ioremap_wc() to devm_memremap(). This allows the __iomem attribute to be removed from the pointer (which makes sense as accesses treat this as cacheable memory not IO memory). These changes allow sparse checks to pass. Also use devm_ioremap() for the cache inhibited area so unmap occurs automatically when the device is released. Signed-off-by: Roy Pledge <roy.pledge@nxp.com> --- drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 19 +++++++++++++++---- drivers/staging/fsl-mc/bus/dpio/qbman-portal.h | 4 ++-- drivers/staging/fsl-mc/include/dpaa2-io.h | 2 +- 3 files changed, 18 insertions(+), 7 deletions(-)