@@ -3219,9 +3219,96 @@ static void arm_smmu_domain_nested_free(struct iommu_domain *domain)
kfree(container_of(domain, struct arm_smmu_nested_domain, domain));
}
+/*
+ * Convert, in place, the raw invalidation command into an internal format that
+ * can be passed to arm_smmu_cmdq_issue_cmdlist(). Internally commands are
+ * stored in CPU endian.
+ *
+ * Enforce the VMID on the command.
+ */
+static int
+arm_smmu_convert_user_cmd(struct arm_smmu_nested_domain *nested_domain,
+ struct iommu_hwpt_arm_smmuv3_invalidate *cmd)
+{
+ u16 vmid = nested_domain->s2_parent->s2_cfg.vmid;
+
+ cmd->cmd[0] = le64_to_cpu(cmd->cmd[0]);
+ cmd->cmd[1] = le64_to_cpu(cmd->cmd[1]);
+
+ switch (cmd->cmd[0] & CMDQ_0_OP) {
+ case CMDQ_OP_TLBI_NSNH_ALL:
+ /* Convert to NH_ALL */
+ cmd->cmd[0] = CMDQ_OP_TLBI_NH_ALL |
+ FIELD_PREP(CMDQ_TLBI_0_VMID, vmid);
+ cmd->cmd[1] = 0;
+ break;
+ case CMDQ_OP_TLBI_NH_VA:
+ case CMDQ_OP_TLBI_NH_VAA:
+ case CMDQ_OP_TLBI_NH_ALL:
+ case CMDQ_OP_TLBI_NH_ASID:
+ cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID;
+ cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vmid);
+ break;
+ default:
+ return -EIO;
+ }
+ return 0;
+}
+
+static int arm_smmu_cache_invalidate_user(struct iommu_domain *domain,
+ struct iommu_user_data_array *array)
+{
+ struct arm_smmu_nested_domain *nested_domain =
+ container_of(domain, struct arm_smmu_nested_domain, domain);
+ struct arm_smmu_device *smmu = nested_domain->s2_parent->smmu;
+ struct iommu_hwpt_arm_smmuv3_invalidate *last_batch;
+ struct iommu_hwpt_arm_smmuv3_invalidate *cmds;
+ struct iommu_hwpt_arm_smmuv3_invalidate *cur;
+ struct iommu_hwpt_arm_smmuv3_invalidate *end;
+ int ret;
+
+ cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
+ if (!cmds)
+ return -ENOMEM;
+ cur = cmds;
+ end = cmds + array->entry_num;
+
+ static_assert(sizeof(*cmds) == 2 * sizeof(u64));
+ ret = iommu_copy_struct_from_full_user_array(
+ cmds, sizeof(*cmds), array,
+ IOMMU_HWPT_INVALIDATE_DATA_ARM_SMMUV3);
+ if (ret)
+ goto out;
+
+ last_batch = cmds;
+ while (cur != end) {
+ ret = arm_smmu_convert_user_cmd(nested_domain, cur);
+ if (ret)
+ goto out;
+
+ /* FIXME work in blocks of CMDQ_BATCH_ENTRIES and copy each block? */
+ cur++;
+ if (cur != end && (cur - last_batch) != CMDQ_BATCH_ENTRIES - 1)
+ continue;
+
+ ret = arm_smmu_cmdq_issue_cmdlist(smmu, last_batch->cmd,
+ cur - last_batch, true);
+ if (ret) {
+ cur--;
+ goto out;
+ }
+ last_batch = cur;
+ }
+out:
+ array->entry_num = cur - cmds;
+ kfree(cmds);
+ return ret;
+}
+
static const struct iommu_domain_ops arm_smmu_nested_ops = {
.attach_dev = arm_smmu_attach_dev_nested,
.free = arm_smmu_domain_nested_free,
+ .cache_invalidate_user = arm_smmu_cache_invalidate_user,
};
static struct iommu_domain *
@@ -3249,6 +3336,14 @@ arm_smmu_domain_alloc_nesting(struct device *dev, u32 flags,
!(master->smmu->features & ARM_SMMU_FEAT_S2FWB))
return ERR_PTR(-EOPNOTSUPP);
+ /*
+ * FORCE_SYNC is not set with FEAT_NESTING. Some study of the exact HW
+ * defect is needed to determine if arm_smmu_cache_invalidate_user()
+ * needs any change to remove this.
+ */
+ if (WARN_ON(master->smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC))
+ return ERR_PTR(-EOPNOTSUPP);
+
ret = iommu_copy_struct_from_user(&arg, user_data,
IOMMU_HWPT_DATA_ARM_SMMUV3, ste);
if (ret)
@@ -521,6 +521,7 @@ struct arm_smmu_cmdq_ent {
#define CMDQ_OP_TLBI_NH_ALL 0x10
#define CMDQ_OP_TLBI_NH_ASID 0x11
#define CMDQ_OP_TLBI_NH_VA 0x12
+ #define CMDQ_OP_TLBI_NH_VAA 0x13
#define CMDQ_OP_TLBI_EL2_ALL 0x20
#define CMDQ_OP_TLBI_EL2_ASID 0x21
#define CMDQ_OP_TLBI_EL2_VA 0x22
@@ -491,7 +491,9 @@ static inline int __iommu_copy_struct_from_user_array(
* @index: Index to the location in the array to copy user data from
* @min_last: The last member of the data structure @kdst points in the
* initial version.
- * Return 0 for success, otherwise -error.
+ *
+ * Copy a single entry from a user array. Return 0 for success, otherwise
+ * -error.
*/
#define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
min_last) \
@@ -499,6 +501,51 @@ static inline int __iommu_copy_struct_from_user_array(
kdst, user_array, data_type, index, sizeof(*(kdst)), \
offsetofend(typeof(*(kdst)), min_last))
+
+/**
+ * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user
+ * space data from an iommu_user_data_array
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @kdst_entry_size: sizeof(*kdst)
+ * @user_array: Pointer to a struct iommu_user_data_array for a user space
+ * array
+ * @data_type: The data type of the @kdst. Must match with @user_array->type
+ *
+ * Copy the entire user array. kdst must have room for kdst_entry_size *
+ * user_array->entry_num bytes. Return 0 for success, otherwise -error.
+ */
+static inline int
+iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
+ struct iommu_user_data_array *user_array,
+ unsigned int data_type)
+{
+ unsigned int i;
+ int ret;
+
+ if (user_array->type != data_type)
+ return -EINVAL;
+ if (!user_array->entry_num)
+ return -EINVAL;
+ if (likely(user_array->entry_len == kdst_entry_size)) {
+ if (copy_from_user(kdst, user_array->uptr,
+ user_array->entry_num *
+ user_array->entry_len))
+ return -EFAULT;
+ }
+
+ /* Copy item by item */
+ for (i = 0; i != user_array->entry_num; i++) {
+ ret = copy_struct_from_user(
+ kdst + kdst_entry_size * i, kdst_entry_size,
+ user_array->uptr + user_array->entry_len * i,
+ user_array->entry_len);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
@@ -682,9 +682,11 @@ struct iommu_hwpt_get_dirty_bitmap {
* enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
* Data Type
* @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
+ * @IOMMU_HWPT_INVALIDATE_DATA_ARM_SMMUV3: Invalidation data for ARM SMMUv3
*/
enum iommu_hwpt_invalidate_data_type {
IOMMU_HWPT_INVALIDATE_DATA_VTD_S1 = 0,
+ IOMMU_HWPT_INVALIDATE_DATA_ARM_SMMUV3 = 1,
};
/**
@@ -723,6 +725,28 @@ struct iommu_hwpt_vtd_s1_invalidate {
__u32 __reserved;
};
+/**
+ * struct iommu_hwpt_arm_smmuv3_invalidate - ARM SMMUv3 cahce invalidation
+ * (IOMMU_HWPT_INVALIDATE_DATA_ARM_SMMUV3)
+ * @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ.
+ * Must be little-endian.
+ *
+ * Supported command list:
+ * CMDQ_OP_TLBI_NSNH_ALL
+ * CMDQ_OP_TLBI_NH_VA
+ * CMDQ_OP_TLBI_NH_VAA
+ * CMDQ_OP_TLBI_NH_ALL
+ * CMDQ_OP_TLBI_NH_ASID
+ *
+ * This API does not support ATS invalidation. Userspace must not request EATS,
+ * or enable ATS in the IDR.
+ *
+ * -EIO will be returned if the command is not supported.
+ */
+struct iommu_hwpt_arm_smmuv3_invalidate {
+ __aligned_u64 cmd[2];
+};
+
/**
* struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
* @size: sizeof(struct iommu_hwpt_invalidate)