@@ -77,7 +77,6 @@ struct iommu_table {
#ifdef CONFIG_IOMMU_API
struct iommu_group *it_group;
#endif
- void (*set_bypass)(struct iommu_table *tbl, bool enable);
};
/* Pure 2^n version of get_order */
@@ -56,6 +56,8 @@ struct spapr_tce_iommu_ops {
struct iommu_table *(*get_table)(
struct spapr_tce_iommu_group *data,
int num);
+ void (*take_ownership)(struct spapr_tce_iommu_group *data,
+ bool enable);
};
struct spapr_tce_iommu_group {
@@ -1139,14 +1139,6 @@ int iommu_take_ownership(struct iommu_table *tbl)
memset(tbl->it_map, 0xff, sz);
iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
- /*
- * Disable iommu bypass, otherwise the user can DMA to all of
- * our physical memory via the bypass window instead of just
- * the pages that has been explicitly mapped into the iommu
- */
- if (tbl->set_bypass)
- tbl->set_bypass(tbl, false);
-
return 0;
}
EXPORT_SYMBOL_GPL(iommu_take_ownership);
@@ -1161,10 +1153,6 @@ void iommu_release_ownership(struct iommu_table *tbl)
/* Restore bit#0 set by iommu_init_table() */
if (tbl->it_offset == 0)
set_bit(0, tbl->it_map);
-
- /* The kernel owns the device now, we can restore the iommu bypass */
- if (tbl->set_bypass)
- tbl->set_bypass(tbl, true);
}
EXPORT_SYMBOL_GPL(iommu_release_ownership);
@@ -1105,10 +1105,8 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
}
-static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
+static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
{
- struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
- tce32.table);
uint16_t window_id = (pe->pe_number << 1 ) + 1;
int64_t rc;
@@ -1136,7 +1134,7 @@ static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
* host side.
*/
if (pe->pdev)
- set_iommu_table_base(&pe->pdev->dev, tbl);
+ set_iommu_table_base(&pe->pdev->dev, &pe->tce32.table);
else
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
}
@@ -1152,15 +1150,21 @@ static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
/* TVE #1 is selected by PCI address bit 59 */
pe->tce_bypass_base = 1ull << 59;
- /* Install set_bypass callback for VFIO */
- pe->tce32.table.set_bypass = pnv_pci_ioda2_set_bypass;
-
/* Enable bypass by default */
- pnv_pci_ioda2_set_bypass(&pe->tce32.table, true);
+ pnv_pci_ioda2_set_bypass(pe, true);
+}
+
+static void pnv_ioda2_take_ownership(struct spapr_tce_iommu_group *data,
+ bool enable)
+{
+ struct pnv_ioda_pe *pe = data->iommu_owner;
+
+ pnv_pci_ioda2_set_bypass(pe, !enable);
}
static struct spapr_tce_iommu_ops pnv_pci_ioda2_ops = {
.get_table = pnv_ioda1_iommu_get_table,
+ .take_ownership = pnv_ioda2_take_ownership,
};
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
@@ -76,6 +76,13 @@ static struct iommu_table *spapr_tce_find_table(
return ret;
}
+static void tce_iommu_take_ownership_notify(struct spapr_tce_iommu_group *data,
+ bool enable)
+{
+ if (data && data->ops && data->ops->take_ownership)
+ data->ops->take_ownership(data, enable);
+}
+
static int tce_iommu_enable(struct tce_container *container)
{
int ret = 0;
@@ -413,6 +420,12 @@ static int tce_iommu_attach_group(void *iommu_data,
ret = iommu_take_ownership(tbl);
if (!ret)
container->grp = iommu_group;
+ /*
+ * Disable iommu bypass, otherwise the user can DMA to all of
+ * our physical memory via the bypass window instead of just
+ * the pages that has been explicitly mapped into the iommu
+ */
+ tce_iommu_take_ownership_notify(data, true);
}
mutex_unlock(&container->lock);
@@ -450,6 +463,9 @@ static void tce_iommu_detach_group(void *iommu_data,
BUG_ON(!tbl);
iommu_release_ownership(tbl);
+
+ /* Kernel owns the device now, we can restore bypass */
+ tce_iommu_take_ownership_notify(data, false);
}
mutex_unlock(&container->lock);
}