@@ -899,11 +899,8 @@ static void enable_iommu(struct amd_iommu *iommu)
spin_lock_irqsave(&iommu->lock, flags);
- if ( iommu->enabled )
- {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return;
- }
+ if ( unlikely(iommu->enabled) )
+ goto out;
amd_iommu_erratum_746_workaround(iommu);
@@ -957,6 +954,8 @@ static void enable_iommu(struct amd_iommu *iommu)
amd_iommu_flush_all_caches(iommu);
iommu->enabled = 1;
+
+ out:
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -966,11 +965,8 @@ static void disable_iommu(struct amd_iommu *iommu)
spin_lock_irqsave(&iommu->lock, flags);
- if ( !iommu->enabled )
- {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return;
- }
+ if ( unlikely(!iommu->enabled) )
+ goto out;
if ( !iommu->ctrl.int_cap_xt_en )
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
@@ -988,6 +984,7 @@ static void disable_iommu(struct amd_iommu *iommu)
iommu->enabled = 0;
+ out:
spin_unlock_irqrestore(&iommu->lock, flags);
}
... to avoid having multiple spin_unlock_irqrestore() calls. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Wei Liu <wl@xen.org> CC: Roger Pau Monné <roger.pau@citrix.com> CC: Boris Ostrovsky <boris.ostrovsky@oracle.com> CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> CC: Brian Woods <brian.woods@amd.com> Interestingly GCC 6.3 managed to fold disable_iommu() automatically. There is some partial folding for enable_iommu() (insofar as there is only a single call to _spin_unlock_irqrestore emitted), but this delta yeilds add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-20 (-20) Function old new delta enable_iommu 1844 1824 -20 Total: Before=3340299, After=3340279, chg -0.00% which means that something wasn't done automatically. Noticed while investigating the S3 regression. --- xen/drivers/passthrough/amd/iommu_init.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-)