@@ -77,7 +77,6 @@ struct iommu_table {
#ifdef CONFIG_IOMMU_API
struct iommu_group *it_group;
#endif
- void (*set_bypass)(struct iommu_table *tbl, bool enable);
};
/* Pure 2^n version of get_order */
@@ -58,6 +58,8 @@ struct spapr_tce_iommu_ops {
struct iommu_table *(*get_table)(
struct spapr_tce_iommu_group *data,
phys_addr_t addr);
+ void (*take_ownership)(struct spapr_tce_iommu_group *data,
+ bool enable);
};
struct spapr_tce_iommu_group {
@@ -1116,14 +1116,6 @@ int iommu_take_ownership(struct iommu_table *tbl)
memset(tbl->it_map, 0xff, sz);
iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
- /*
- * Disable iommu bypass, otherwise the user can DMA to all of
- * our physical memory via the bypass window instead of just
- * the pages that has been explicitly mapped into the iommu
- */
- if (tbl->set_bypass)
- tbl->set_bypass(tbl, false);
-
return 0;
}
EXPORT_SYMBOL_GPL(iommu_take_ownership);
@@ -1138,10 +1130,6 @@ void iommu_release_ownership(struct iommu_table *tbl)
/* Restore bit#0 set by iommu_init_table() */
if (tbl->it_offset == 0)
set_bit(0, tbl->it_map);
-
- /* The kernel owns the device now, we can restore the iommu bypass */
- if (tbl->set_bypass)
- tbl->set_bypass(tbl, true);
}
EXPORT_SYMBOL_GPL(iommu_release_ownership);
@@ -709,10 +709,8 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
}
-static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
+static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
{
- struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
- tce32.table);
uint16_t window_id = (pe->pe_number << 1 ) + 1;
int64_t rc;
@@ -752,15 +750,21 @@ static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
/* TVE #1 is selected by PCI address bit 59 */
pe->tce_bypass_base = 1ull << 59;
- /* Install set_bypass callback for VFIO */
- pe->tce32.table.set_bypass = pnv_pci_ioda2_set_bypass;
-
/* Enable bypass by default */
- pnv_pci_ioda2_set_bypass(&pe->tce32.table, true);
+ pnv_pci_ioda2_set_bypass(pe, true);
+}
+
+static void pnv_ioda2_take_ownership(struct spapr_tce_iommu_group *data,
+ bool enable)
+{
+ struct pnv_ioda_pe *pe = data->iommu_owner;
+
+ pnv_pci_ioda2_set_bypass(pe, !enable);
}
static struct spapr_tce_iommu_ops pnv_pci_ioda2_ops = {
.get_table = pnv_ioda1_iommu_get_table,
+ .take_ownership = pnv_ioda2_take_ownership,
};
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
@@ -47,6 +47,14 @@ struct tce_container {
bool enabled;
};
+
+static void tce_iommu_take_ownership_notify(struct spapr_tce_iommu_group *data,
+ bool enable)
+{
+ if (data && data->ops && data->ops->take_ownership)
+ data->ops->take_ownership(data, enable);
+}
+
static int tce_iommu_enable(struct tce_container *container)
{
int ret = 0;
@@ -367,6 +375,12 @@ static int tce_iommu_attach_group(void *iommu_data,
ret = iommu_take_ownership(tbl);
if (!ret)
container->grp = iommu_group;
+ /*
+ * Disable iommu bypass, otherwise the user can DMA to all of
+ * our physical memory via the bypass window instead of just
+ * the pages that has been explicitly mapped into the iommu
+ */
+ tce_iommu_take_ownership_notify(data, true);
}
mutex_unlock(&container->lock);
@@ -404,6 +418,9 @@ static void tce_iommu_detach_group(void *iommu_data,
BUG_ON(!tbl);
iommu_release_ownership(tbl);
+
+ /* Kernel owns the device now, we can restore bypass */
+ tce_iommu_take_ownership_notify(data, false);
}
mutex_unlock(&container->lock);
}