@@ -717,6 +717,10 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_version_detect(pci);
+ ret = dw_pcie_map_detect(pci);
+ if (ret)
+ return ret;
+
dw_pcie_iatu_detect(pci);
ep->ib_window_map = devm_kcalloc(dev,
@@ -409,6 +409,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
dw_pcie_version_detect(pci);
+ ret = dw_pcie_map_detect(pci);
+ if (ret)
+ goto err_free_msi;
+
dw_pcie_iatu_detect(pci);
ret = dw_pcie_setup_rc(pp);
@@ -213,7 +213,7 @@ static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
{
void __iomem *base = pci->atu_base;
- if (pci->iatu_unroll_enabled)
+ if (pci->iatu_dma_unrolled)
base += PCIE_ATU_UNROLL_BASE(dir, index);
else
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
@@ -579,24 +579,56 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
}
-static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+int dw_pcie_map_detect(struct dw_pcie *pci)
{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ struct resource *res;
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
if (val == 0xffffffff)
- return true;
+ pci->iatu_dma_unrolled = true;
+ else
+ pci->iatu_dma_unrolled = false;
+
+ if (!pci->iatu_dma_unrolled) {
+ pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
+ pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
+
+ dev_info(pci->dev, "iATU/DMA unroll: disabled\n");
+
+ return 0;
+ }
- return false;
+ if (!pci->atu_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ if (res) {
+ pci->atu_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->atu_base))
+ return PTR_ERR(pci->atu_base);
+
+ pci->atu_size = resource_size(res);
+ } else {
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+ }
+
+ /* Pick a minimal default, enough for 8 in and 8 out windows */
+ if (!pci->atu_size)
+ pci->atu_size = SZ_4K;
+
+ dev_info(pci->dev, "iATU/DMA unroll: enabled\n");
+
+ return 0;
}
-static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
+void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
int max_region, ob, ib;
u32 val, min, dir;
u64 max;
- if (pci->iatu_unroll_enabled) {
+ if (pci->iatu_dma_unrolled) {
max_region = min((int)pci->atu_size / 512, 256);
} else {
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
@@ -640,38 +672,6 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
pci->num_ib_windows = ib;
pci->region_align = 1 << fls(min);
pci->region_limit = (max << 32) | (SZ_4G - 1);
-}
-
-void dw_pcie_iatu_detect(struct dw_pcie *pci)
-{
- struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
-
- pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
- if (pci->iatu_unroll_enabled) {
- if (!pci->atu_base) {
- struct resource *res =
- platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
- if (res) {
- pci->atu_size = resource_size(res);
- pci->atu_base = devm_ioremap_resource(dev, res);
- }
- if (!pci->atu_base || IS_ERR(pci->atu_base))
- pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
- }
-
- if (!pci->atu_size)
- /* Pick a minimal default, enough for 8 in and 8 out windows */
- pci->atu_size = SZ_4K;
- } else {
- pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
- pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
- }
-
- dw_pcie_iatu_detect_regions(pci);
-
- dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
- "enabled" : "disabled");
dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n",
pci->num_ob_windows, pci->num_ib_windows,
@@ -310,7 +310,7 @@ struct dw_pcie {
int num_lanes;
int link_gen;
u8 n_fts[2];
- bool iatu_unroll_enabled: 1;
+ bool iatu_dma_unrolled: 1;
bool io_cfg_atu_shared: 1;
};
@@ -343,6 +343,7 @@ int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
int type, u64 cpu_addr, u8 bar);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
+int dw_pcie_map_detect(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
The iATU and eDMA are accessible over the same AXI/DBI interface and have the same access mode (viewport-based or unrolled). Due to that it will be more suitable to perform the iATU and eDMA CSRs space detection in the same coherent function. Since the iATU CSRs space and access mode detection procedure is already available in the driver let's move it into the dedicated method. The eDMA CSRs space detection will be added there in the next commit. This change is a preparation before adding the eDMA support to the DW PCIe controller driver. Note the new dw_pcie_map_detect() method fails if the iATU MMIO region IO-remapping fails. It should have been done in the first place since the failed remapping of the detected reg-space is certainly the erroneous situation. Signed-off-by: Serge Semin <Sergey.Semin@baikalelectronics.ru> --- Changelog v2: - This is a new patch added in v2. (@Manivannan) --- .../pci/controller/dwc/pcie-designware-ep.c | 4 + .../pci/controller/dwc/pcie-designware-host.c | 4 + drivers/pci/controller/dwc/pcie-designware.c | 76 +++++++++---------- drivers/pci/controller/dwc/pcie-designware.h | 3 +- 4 files changed, 48 insertions(+), 39 deletions(-)