@@ -226,7 +226,7 @@ int __must_check amd_iommu_map_page(struct domain *d, dfn_t dfn,
unsigned int *flush_flags);
int __must_check amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
unsigned int *flush_flags);
-int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
+int __must_check amd_iommu_alloc_root(struct domain *d);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
paddr_t phys_addr, unsigned long size,
int iw, int ir);
@@ -356,22 +356,6 @@ static inline int amd_iommu_get_paging_mode(unsigned long max_frames)
return level;
}
-static inline struct page_info *alloc_amd_iommu_pgtable(void)
-{
- struct page_info *pg = alloc_domheap_page(NULL, 0);
-
- if ( pg )
- clear_domain_page(page_to_mfn(pg));
-
- return pg;
-}
-
-static inline void free_amd_iommu_pgtable(struct page_info *pg)
-{
- if ( pg )
- free_domheap_page(pg);
-}
-
static inline void *__alloc_amd_iommu_tables(unsigned int order)
{
return alloc_xenheap_pages(order, 0);
@@ -217,7 +217,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn,
mfn = next_table_mfn;
/* allocate lower level page table */
- table = alloc_amd_iommu_pgtable();
+ table = iommu_alloc_pgtable(d);
if ( table == NULL )
{
AMD_IOMMU_DEBUG("Cannot allocate I/O page table\n");
@@ -248,7 +248,7 @@ static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn,
if ( next_table_mfn == 0 )
{
- table = alloc_amd_iommu_pgtable();
+ table = iommu_alloc_pgtable(d);
if ( table == NULL )
{
AMD_IOMMU_DEBUG("Cannot allocate I/O page table\n");
@@ -286,7 +286,7 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
spin_lock(&hd->arch.mapping_lock);
- rc = amd_iommu_alloc_root(hd);
+ rc = amd_iommu_alloc_root(d);
if ( rc )
{
spin_unlock(&hd->arch.mapping_lock);
@@ -458,7 +458,7 @@ int __init amd_iommu_quarantine_init(struct domain *d)
spin_lock(&hd->arch.mapping_lock);
- hd->arch.amd.root_table = alloc_amd_iommu_pgtable();
+ hd->arch.amd.root_table = iommu_alloc_pgtable(d);
if ( !hd->arch.amd.root_table )
goto out;
@@ -473,7 +473,7 @@ int __init amd_iommu_quarantine_init(struct domain *d)
* page table pages, and the resulting allocations are always
* zeroed.
*/
- pg = alloc_amd_iommu_pgtable();
+ pg = iommu_alloc_pgtable(d);
if ( !pg )
break;
@@ -205,11 +205,13 @@ static int iov_enable_xt(void)
return 0;
}
-int amd_iommu_alloc_root(struct domain_iommu *hd)
+int amd_iommu_alloc_root(struct domain *d)
{
+ struct domain_iommu *hd = dom_iommu(d);
+
if ( unlikely(!hd->arch.amd.root_table) )
{
- hd->arch.amd.root_table = alloc_amd_iommu_pgtable();
+ hd->arch.amd.root_table = iommu_alloc_pgtable(d);
if ( !hd->arch.amd.root_table )
return -ENOMEM;
}
@@ -217,12 +219,13 @@ int amd_iommu_alloc_root(struct domain_iommu *hd)
return 0;
}
-static int __must_check allocate_domain_resources(struct domain_iommu *hd)
+static int __must_check allocate_domain_resources(struct domain *d)
{
+ struct domain_iommu *hd = dom_iommu(d);
int rc;
spin_lock(&hd->arch.mapping_lock);
- rc = amd_iommu_alloc_root(hd);
+ rc = amd_iommu_alloc_root(d);
spin_unlock(&hd->arch.mapping_lock);
return rc;
@@ -254,7 +257,7 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
{
const struct amd_iommu *iommu;
- if ( allocate_domain_resources(dom_iommu(d)) )
+ if ( allocate_domain_resources(d) )
BUG();
for_each_amd_iommu ( iommu )
@@ -323,7 +326,6 @@ static int reassign_device(struct domain *source, struct domain *target,
{
struct amd_iommu *iommu;
int bdf, rc;
- struct domain_iommu *t = dom_iommu(target);
bdf = PCI_BDF2(pdev->bus, pdev->devfn);
iommu = find_iommu_for_device(pdev->seg, bdf);
@@ -344,7 +346,7 @@ static int reassign_device(struct domain *source, struct domain *target,
pdev->domain = target;
}
- rc = allocate_domain_resources(t);
+ rc = allocate_domain_resources(target);
if ( rc )
return rc;
@@ -376,65 +378,9 @@ static int amd_iommu_assign_device(struct domain *d, u8 devfn,
return reassign_device(pdev->domain, d, devfn, pdev);
}
-static void deallocate_next_page_table(struct page_info *pg, int level)
-{
- PFN_ORDER(pg) = level;
- spin_lock(&iommu_pt_cleanup_lock);
- page_list_add_tail(pg, &iommu_pt_cleanup_list);
- spin_unlock(&iommu_pt_cleanup_lock);
-}
-
-static void deallocate_page_table(struct page_info *pg)
-{
- struct amd_iommu_pte *table_vaddr;
- unsigned int index, level = PFN_ORDER(pg);
-
- PFN_ORDER(pg) = 0;
-
- if ( level <= 1 )
- {
- free_amd_iommu_pgtable(pg);
- return;
- }
-
- table_vaddr = __map_domain_page(pg);
-
- for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ )
- {
- struct amd_iommu_pte *pde = &table_vaddr[index];
-
- if ( pde->mfn && pde->next_level && pde->pr )
- {
- /* We do not support skip levels yet */
- ASSERT(pde->next_level == level - 1);
- deallocate_next_page_table(mfn_to_page(_mfn(pde->mfn)),
- pde->next_level);
- }
- }
-
- unmap_domain_page(table_vaddr);
- free_amd_iommu_pgtable(pg);
-}
-
-static void deallocate_iommu_page_tables(struct domain *d)
-{
- struct domain_iommu *hd = dom_iommu(d);
-
- spin_lock(&hd->arch.mapping_lock);
- if ( hd->arch.amd.root_table )
- {
- deallocate_next_page_table(hd->arch.amd.root_table,
- hd->arch.amd.paging_mode);
- hd->arch.amd.root_table = NULL;
- }
- spin_unlock(&hd->arch.mapping_lock);
-}
-
-
static void amd_iommu_domain_destroy(struct domain *d)
{
- deallocate_iommu_page_tables(d);
- amd_iommu_flush_all_pages(d);
+ dom_iommu(d)->arch.amd.root_table = NULL;
}
static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev)
@@ -620,7 +566,6 @@ static const struct iommu_ops __initconstrel _iommu_ops = {
.unmap_page = amd_iommu_unmap_page,
.iotlb_flush = amd_iommu_flush_iotlb_pages,
.iotlb_flush_all = amd_iommu_flush_iotlb_all,
- .free_page_table = deallocate_page_table,
.reassign_device = reassign_device,
.get_device_group_id = amd_iommu_group_id,
.enable_x2apic = iov_enable_xt,