diff mbox series

[8/9] VT-d: drop/move a few QI related constants

Message ID 1b8558d4-42cc-bd68-e6c8-138f40f81e1c@suse.com (mailing list archive)
State New
Headers show
Series IOMMU: XSA-373 follow-on | expand

Commit Message

Jan Beulich June 9, 2021, 9:29 a.m. UTC
Replace uses of QINVAL_ENTRY_ORDER and QINVAL_INDEX_SHIFT, such that
the constants can be dropped. Move the remaining QINVAL_* ones to the
single source file using them.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

Comments

Tian, Kevin June 24, 2021, 5:32 a.m. UTC | #1
> From: Jan Beulich <jbeulich@suse.com>
> Sent: Wednesday, June 9, 2021 5:30 PM
> 
> Replace uses of QINVAL_ENTRY_ORDER and QINVAL_INDEX_SHIFT, such that
> the constants can be dropped. Move the remaining QINVAL_* ones to the
> single source file using them.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

> 
> --- a/xen/drivers/passthrough/vtd/iommu.h
> +++ b/xen/drivers/passthrough/vtd/iommu.h
> @@ -451,17 +451,6 @@ struct qinval_entry {
>      }q;
>  };
> 
> -/* Each entry is 16 bytes, so 2^8 entries per page */
> -#define QINVAL_ENTRY_ORDER  ( PAGE_SHIFT - 4 )
> -#define QINVAL_MAX_ENTRY_NR (1u << (7 + QINVAL_ENTRY_ORDER))
> -
> -/* Status data flag */
> -#define QINVAL_STAT_INIT  0
> -#define QINVAL_STAT_DONE  1
> -
> -/* Queue invalidation head/tail shift */
> -#define QINVAL_INDEX_SHIFT 4
> -
>  #define TYPE_INVAL_CONTEXT      0x1
>  #define TYPE_INVAL_IOTLB        0x2
>  #define TYPE_INVAL_DEVICE_IOTLB 0x3
> --- a/xen/drivers/passthrough/vtd/qinval.c
> +++ b/xen/drivers/passthrough/vtd/qinval.c
> @@ -29,6 +29,13 @@
>  #include "extern.h"
>  #include "../ats.h"
> 
> +/* Each entry is 16 bytes, and there can be up to 2^7 pages. */
> +#define QINVAL_MAX_ENTRY_NR (1u << (7 + PAGE_SHIFT_4K - 4))
> +
> +/* Status data flag */
> +#define QINVAL_STAT_INIT  0
> +#define QINVAL_STAT_DONE  1
> +
>  static unsigned int __read_mostly qi_pg_order;
>  static unsigned int __read_mostly qi_entry_nr;
> 
> @@ -45,11 +52,11 @@ static unsigned int qinval_next_index(st
>  {
>      unsigned int tail = dmar_readl(iommu->reg, DMAR_IQT_REG);
> 
> -    tail >>= QINVAL_INDEX_SHIFT;
> +    tail /= sizeof(struct qinval_entry);
> 
>      /* (tail+1 == head) indicates a full queue, wait for HW */
>      while ( ((tail + 1) & (qi_entry_nr - 1)) ==
> -            (dmar_readl(iommu->reg, DMAR_IQH_REG) >>
> QINVAL_INDEX_SHIFT) )
> +            (dmar_readl(iommu->reg, DMAR_IQH_REG) / sizeof(struct
> qinval_entry)) )
>      {
>          printk_once(XENLOG_ERR VTDPREFIX " IOMMU#%u: no QI slot
> available\n",
>                      iommu->index);
> @@ -66,7 +73,7 @@ static void qinval_update_qtail(struct v
>      /* Need hold register lock when update tail */
>      ASSERT( spin_is_locked(&iommu->register_lock) );
>      val = (index + 1) & (qi_entry_nr - 1);
> -    dmar_writel(iommu->reg, DMAR_IQT_REG, val << QINVAL_INDEX_SHIFT);
> +    dmar_writel(iommu->reg, DMAR_IQT_REG, val * sizeof(struct
> qinval_entry));
>  }
> 
>  static struct qinval_entry *qi_map_entry(const struct vtd_iommu *iommu,
> @@ -413,17 +420,18 @@ int enable_qinval(struct vtd_iommu *iomm
>               * only one entry left.
>               */
>              BUILD_BUG_ON(CONFIG_NR_CPUS * 2 >= QINVAL_MAX_ENTRY_NR);
> -            qi_pg_order = get_order_from_bytes((num_present_cpus() * 2 + 1)
> <<
> -                                               (PAGE_SHIFT -
> -                                                QINVAL_ENTRY_ORDER));
> -            qi_entry_nr = 1u << (qi_pg_order + QINVAL_ENTRY_ORDER);
> +            qi_pg_order = get_order_from_bytes((num_present_cpus() * 2 + 1) *
> +                                               sizeof(struct qinval_entry));
> +            qi_entry_nr = (PAGE_SIZE << qi_pg_order) /
> +                          sizeof(struct qinval_entry);
> 
>              dprintk(XENLOG_INFO VTDPREFIX,
>                      "QI: using %u-entry ring(s)\n", qi_entry_nr);
>          }
> 
>          iommu->qinval_maddr =
> -            alloc_pgtable_maddr(qi_entry_nr >> QINVAL_ENTRY_ORDER,
> +            alloc_pgtable_maddr(PFN_DOWN(qi_entry_nr *
> +                                         sizeof(struct qinval_entry)),
>                                  iommu->node);
>          if ( iommu->qinval_maddr == 0 )
>          {
diff mbox series

Patch

--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -451,17 +451,6 @@  struct qinval_entry {
     }q;
 };
 
-/* Each entry is 16 bytes, so 2^8 entries per page */
-#define QINVAL_ENTRY_ORDER  ( PAGE_SHIFT - 4 )
-#define QINVAL_MAX_ENTRY_NR (1u << (7 + QINVAL_ENTRY_ORDER))
-
-/* Status data flag */
-#define QINVAL_STAT_INIT  0
-#define QINVAL_STAT_DONE  1
-
-/* Queue invalidation head/tail shift */
-#define QINVAL_INDEX_SHIFT 4
-
 #define TYPE_INVAL_CONTEXT      0x1
 #define TYPE_INVAL_IOTLB        0x2
 #define TYPE_INVAL_DEVICE_IOTLB 0x3
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -29,6 +29,13 @@ 
 #include "extern.h"
 #include "../ats.h"
 
+/* Each entry is 16 bytes, and there can be up to 2^7 pages. */
+#define QINVAL_MAX_ENTRY_NR (1u << (7 + PAGE_SHIFT_4K - 4))
+
+/* Status data flag */
+#define QINVAL_STAT_INIT  0
+#define QINVAL_STAT_DONE  1
+
 static unsigned int __read_mostly qi_pg_order;
 static unsigned int __read_mostly qi_entry_nr;
 
@@ -45,11 +52,11 @@  static unsigned int qinval_next_index(st
 {
     unsigned int tail = dmar_readl(iommu->reg, DMAR_IQT_REG);
 
-    tail >>= QINVAL_INDEX_SHIFT;
+    tail /= sizeof(struct qinval_entry);
 
     /* (tail+1 == head) indicates a full queue, wait for HW */
     while ( ((tail + 1) & (qi_entry_nr - 1)) ==
-            (dmar_readl(iommu->reg, DMAR_IQH_REG) >> QINVAL_INDEX_SHIFT) )
+            (dmar_readl(iommu->reg, DMAR_IQH_REG) / sizeof(struct qinval_entry)) )
     {
         printk_once(XENLOG_ERR VTDPREFIX " IOMMU#%u: no QI slot available\n",
                     iommu->index);
@@ -66,7 +73,7 @@  static void qinval_update_qtail(struct v
     /* Need hold register lock when update tail */
     ASSERT( spin_is_locked(&iommu->register_lock) );
     val = (index + 1) & (qi_entry_nr - 1);
-    dmar_writel(iommu->reg, DMAR_IQT_REG, val << QINVAL_INDEX_SHIFT);
+    dmar_writel(iommu->reg, DMAR_IQT_REG, val * sizeof(struct qinval_entry));
 }
 
 static struct qinval_entry *qi_map_entry(const struct vtd_iommu *iommu,
@@ -413,17 +420,18 @@  int enable_qinval(struct vtd_iommu *iomm
              * only one entry left.
              */
             BUILD_BUG_ON(CONFIG_NR_CPUS * 2 >= QINVAL_MAX_ENTRY_NR);
-            qi_pg_order = get_order_from_bytes((num_present_cpus() * 2 + 1) <<
-                                               (PAGE_SHIFT -
-                                                QINVAL_ENTRY_ORDER));
-            qi_entry_nr = 1u << (qi_pg_order + QINVAL_ENTRY_ORDER);
+            qi_pg_order = get_order_from_bytes((num_present_cpus() * 2 + 1) *
+                                               sizeof(struct qinval_entry));
+            qi_entry_nr = (PAGE_SIZE << qi_pg_order) /
+                          sizeof(struct qinval_entry);
 
             dprintk(XENLOG_INFO VTDPREFIX,
                     "QI: using %u-entry ring(s)\n", qi_entry_nr);
         }
 
         iommu->qinval_maddr =
-            alloc_pgtable_maddr(qi_entry_nr >> QINVAL_ENTRY_ORDER,
+            alloc_pgtable_maddr(PFN_DOWN(qi_entry_nr *
+                                         sizeof(struct qinval_entry)),
                                 iommu->node);
         if ( iommu->qinval_maddr == 0 )
         {