@@ -54,7 +54,6 @@ static inline void check_dmar_capabilities(struct intel_iommu *a,
CHECK_FEATURE_MISMATCH(a, b, ecap, slts, ECAP_SLTS_MASK);
CHECK_FEATURE_MISMATCH(a, b, ecap, nwfs, ECAP_NWFS_MASK);
CHECK_FEATURE_MISMATCH(a, b, ecap, slads, ECAP_SLADS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, vcs, ECAP_VCS_MASK);
CHECK_FEATURE_MISMATCH(a, b, ecap, smts, ECAP_SMTS_MASK);
CHECK_FEATURE_MISMATCH(a, b, ecap, pds, ECAP_PDS_MASK);
CHECK_FEATURE_MISMATCH(a, b, ecap, dit, ECAP_DIT_MASK);
@@ -101,7 +100,6 @@ static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slts, ECAP_SLTS_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nwfs, ECAP_NWFS_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slads, ECAP_SLADS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, vcs, ECAP_VCS_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smts, ECAP_SMTS_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pds, ECAP_PDS_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dit, ECAP_DIT_MASK);
@@ -989,8 +989,6 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
warn_invalid_dmar(phys_addr, " returns all ones");
goto unmap;
}
- if (ecap_vcs(iommu->ecap))
- iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
@@ -1721,9 +1721,6 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (ecap_prs(iommu->ecap))
intel_svm_finish_prq(iommu);
}
- if (vccap_pasid(iommu->vccap))
- ioasid_unregister_allocator(&iommu->pasid_allocator);
-
#endif
}
@@ -2793,85 +2790,6 @@ static int copy_translation_tables(struct intel_iommu *iommu)
return ret;
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
-static ioasid_t intel_vcmd_ioasid_alloc(ioasid_t min, ioasid_t max, void *data)
-{
- struct intel_iommu *iommu = data;
- ioasid_t ioasid;
-
- if (!iommu)
- return INVALID_IOASID;
- /*
- * VT-d virtual command interface always uses the full 20 bit
- * PASID range. Host can partition guest PASID range based on
- * policies but it is out of guest's control.
- */
- if (min < PASID_MIN || max > intel_pasid_max_id)
- return INVALID_IOASID;
-
- if (vcmd_alloc_pasid(iommu, &ioasid))
- return INVALID_IOASID;
-
- return ioasid;
-}
-
-static void intel_vcmd_ioasid_free(ioasid_t ioasid, void *data)
-{
- struct intel_iommu *iommu = data;
-
- if (!iommu)
- return;
- /*
- * Sanity check the ioasid owner is done at upper layer, e.g. VFIO
- * We can only free the PASID when all the devices are unbound.
- */
- if (ioasid_find(NULL, ioasid, NULL)) {
- pr_alert("Cannot free active IOASID %d\n", ioasid);
- return;
- }
- vcmd_free_pasid(iommu, ioasid);
-}
-
-static void register_pasid_allocator(struct intel_iommu *iommu)
-{
- /*
- * If we are running in the host, no need for custom allocator
- * in that PASIDs are allocated from the host system-wide.
- */
- if (!cap_caching_mode(iommu->cap))
- return;
-
- if (!sm_supported(iommu)) {
- pr_warn("VT-d Scalable Mode not enabled, no PASID allocation\n");
- return;
- }
-
- /*
- * Register a custom PASID allocator if we are running in a guest,
- * guest PASID must be obtained via virtual command interface.
- * There can be multiple vIOMMUs in each guest but only one allocator
- * is active. All vIOMMU allocators will eventually be calling the same
- * host allocator.
- */
- if (!vccap_pasid(iommu->vccap))
- return;
-
- pr_info("Register custom PASID allocator\n");
- iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
- iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
- iommu->pasid_allocator.pdata = (void *)iommu;
- if (ioasid_register_allocator(&iommu->pasid_allocator)) {
- pr_warn("Custom PASID allocator failed, scalable mode disabled\n");
- /*
- * Disable scalable mode on this IOMMU if there
- * is no custom allocator. Mixing SM capable vIOMMU
- * and non-SM vIOMMU are not supported.
- */
- intel_iommu_sm = 0;
- }
-}
-#endif
-
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
@@ -2960,9 +2878,6 @@ static int __init init_dmars(void)
*/
for_each_active_iommu(iommu, drhd) {
iommu_flush_write_buffer(iommu);
-#ifdef CONFIG_INTEL_IOMMU_SVM
- register_pasid_allocator(iommu);
-#endif
iommu_set_root_entry(iommu);
}
@@ -184,7 +184,6 @@
#define ecap_flts(e) (((e) >> 47) & 0x1)
#define ecap_slts(e) (((e) >> 46) & 0x1)
#define ecap_slads(e) (((e) >> 45) & 0x1)
-#define ecap_vcs(e) (((e) >> 44) & 0x1)
#define ecap_smts(e) (((e) >> 43) & 0x1)
#define ecap_dit(e) (((e) >> 41) & 0x1)
#define ecap_pds(e) (((e) >> 42) & 0x1)
@@ -210,9 +209,6 @@
#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */
-/* Virtual command interface capability */
-#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
-
/* IOTLB_REG */
#define DMA_TLB_FLUSH_GRANU_OFFSET 60
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
@@ -307,8 +303,6 @@
#define DMA_PRS_PPR ((u32)1)
#define DMA_PRS_PRO ((u32)2)
-#define DMA_VCS_PAS ((u64)1)
-
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
do { \
cycles_t start_time = get_cycles(); \
@@ -560,7 +554,6 @@ struct intel_iommu {
u64 reg_size; /* size of hw register set */
u64 cap;
u64 ecap;
- u64 vccap;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
raw_spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
@@ -583,7 +576,6 @@ struct intel_iommu {
unsigned char prq_name[16]; /* Name for PRQ interrupt */
unsigned long prq_seq_number;
struct completion prq_complete;
- struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[16];