@@ -2432,12 +2432,14 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
static struct iommu_domain *
amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
struct iommu_domain *parent,
+ unsigned long persistent_id,
const struct iommu_user_data *user_data)
{
unsigned int type = IOMMU_DOMAIN_UNMANAGED;
- if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data)
+ if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data
+ || persistent_id)
return ERR_PTR(-EOPNOTSUPP);
return do_iommu_domain_alloc(type, dev, flags);
@@ -3049,6 +3049,7 @@ static struct iommu_domain arm_smmu_blocked_domain = {
static struct iommu_domain *
arm_smmu_domain_alloc_user(struct device *dev, u32 flags,
struct iommu_domain *parent,
+ unsigned long persistent_id,
const struct iommu_user_data *user_data)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
@@ -3058,7 +3059,7 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags,
if (flags & ~PAGING_FLAGS)
return ERR_PTR(-EOPNOTSUPP);
- if (parent || user_data)
+ if (parent || user_data || persistent_id)
return ERR_PTR(-EOPNOTSUPP);
smmu_domain = arm_smmu_domain_alloc();
@@ -3729,6 +3729,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
static struct iommu_domain *
intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
struct iommu_domain *parent,
+ unsigned long persistent_id,
const struct iommu_user_data *user_data)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
@@ -3761,6 +3762,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
domain->type = IOMMU_DOMAIN_UNMANAGED;
domain->owner = &intel_iommu_ops;
domain->ops = intel_iommu_ops.default_domain_ops;
+ domain->persistent_id = persistent_id;
if (nested_parent) {
dmar_domain->nested_parent = true;
@@ -137,6 +137,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if (ops->domain_alloc_user) {
hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL,
+ ictx->persistent_id,
user_data);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
@@ -239,7 +240,9 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
hwpt->domain = ops->domain_alloc_user(idev->dev,
flags & ~IOMMU_HWPT_FAULT_ID_VALID,
- parent->common.domain, user_data);
+ parent->common.domain,
+ ictx->persistent_id,
+ user_data);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL;
@@ -318,6 +318,7 @@ __mock_domain_alloc_nested(struct mock_iommu_domain *mock_parent,
static struct iommu_domain *
mock_domain_alloc_user(struct device *dev, u32 flags,
struct iommu_domain *parent,
+ unsigned long persistent_id,
const struct iommu_user_data *user_data)
{
struct mock_iommu_domain *mock_parent;
@@ -215,6 +215,11 @@ struct iommu_domain {
struct iommu_dma_cookie *iova_cookie;
int (*iopf_handler)(struct iopf_group *group);
void *fault_data;
+ /*
+ * Persisting and restoring across kexec via KHO.
+ * 0 indicates non-persistent.
+ */
+ unsigned long persistent_id;
union {
struct {
iommu_fault_handler_t handler;
@@ -518,7 +523,9 @@ static inline int __iommu_copy_struct_from_user_array(
* IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be
* NULL while the @user_data can be optionally provided, the
* new domain must support __IOMMU_DOMAIN_PAGING.
- * Upon failure, ERR_PTR must be returned.
+ * Upon failure, ERR_PTR must be returned. Persistent ID is
+ * used to save/restore across kexec; 0 indicates not
+ * persistent.
* @domain_alloc_paging: Allocate an iommu_domain that can be used for
* UNMANAGED, DMA, and DMA_FQ domain types.
* @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
@@ -564,7 +571,7 @@ struct iommu_ops {
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
struct iommu_domain *(*domain_alloc_user)(
struct device *dev, u32 flags, struct iommu_domain *parent,
- const struct iommu_user_data *user_data);
+ unsigned long persistent_id, const struct iommu_user_data *user_data);
struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
struct mm_struct *mm);