Message ID | 20170327101823.99368-4-roger.pau@citrix.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
>>> On 27.03.17 at 12:18, <roger.pau@citrix.com> wrote: > Rearrange the fields of hvm_irq so that gsi_assert_count can be converted into > a variable size array and add a new field to account the number of GSIs. > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -457,7 +457,7 @@ void hvm_migrate_pirqs(struct vcpu *v) > { > struct domain *d = v->domain; > > - if ( !iommu_enabled || !d->arch.hvm_domain.irq.dpci ) > + if ( !iommu_enabled || !d->arch.hvm_domain.irq->dpci ) If the prior patch is really needed - why did it not convert this? (There are more such instances further down.) > @@ -122,7 +124,7 @@ void hvm_isa_irq_assert( > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); > > - ASSERT(isa_irq <= 15); > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); > > spin_lock(&d->arch.hvm_domain.irq_lock); > > @@ -139,7 +141,7 @@ void hvm_isa_irq_deassert( > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); > > - ASSERT(isa_irq <= 15); > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); Not sure about these two - perhaps they'd better be BUILD_BUG_ON()s for the DomU side, and Dom0 should never be allocated less than 16? > @@ -495,7 +497,7 @@ static void irq_dump(struct domain *d) > (uint32_t) hvm_irq->isa_irq.pad[0], > hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], > hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); > - for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 ) > + for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) > printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 > " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", > i, i+7, > @@ -507,6 +509,13 @@ static void irq_dump(struct domain *d) > hvm_irq->gsi_assert_count[i+5], > hvm_irq->gsi_assert_count[i+6], > hvm_irq->gsi_assert_count[i+7]); > + if ( i != hvm_irq->nr_gsis ) > + { > + printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); > + for ( ; i < hvm_irq->nr_gsis; i++) > + printk(" %2.2"PRIu8, hvm_irq->gsi_assert_count[i]); > + printk("\n"); > + } I realize you've just copied this from existing code, but what advantage does %2.2 have over just %2 here? > @@ -545,6 +554,9 @@ static int irq_save_pci(struct domain *d, hvm_domain_context_t *h) > unsigned int asserted, pdev, pintx; > int rc; > > + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) > + return -EOPNOTSUPP; > + > spin_lock(&d->arch.hvm_domain.irq_lock); > > pdev = hvm_irq->callback_via.pci.dev; > @@ -592,6 +604,9 @@ static int irq_load_pci(struct domain *d, hvm_domain_context_t *h) > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > int link, dev, intx, gsi; > > + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) > + return -EOPNOTSUPP; Why do you add these checks here? > --- a/xen/include/asm-x86/domain.h > +++ b/xen/include/asm-x86/domain.h > @@ -17,7 +17,7 @@ > #define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain)) > > #define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \ > - d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector) > + d->arch.hvm_domain.irq->callback_via_type == HVMIRQ_callback_vector) Please take the opportunity and properly parenthesize the use of d here. > @@ -89,13 +77,27 @@ struct hvm_irq { > u8 round_robin_prev_vcpu; > > struct hvm_irq_dpci *dpci; > + > + /* > + * Number of wires asserting each GSI. > + * > + * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space > + * except ISA IRQ 0, which is connected to GSI 2. > + * PCI links map into this space via the PCI-ISA bridge. > + * > + * GSIs 16+ are used only be PCI devices. The mapping from PCI device to > + * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 > + */ > + unsigned int nr_gsis; > + u8 gsi_assert_count[]; > }; I'm afraid at the same time (or in a later patch) the type of this array (and maybe also the type of pci_link_assert_count[]) will need to be widened. Jan
On Mon, Mar 27, 2017 at 09:59:42AM -0600, Jan Beulich wrote: > >>> On 27.03.17 at 12:18, <roger.pau@citrix.com> wrote: > > --- a/xen/arch/x86/hvm/hvm.c > > +++ b/xen/arch/x86/hvm/hvm.c > > @@ -457,7 +457,7 @@ void hvm_migrate_pirqs(struct vcpu *v) > > { > > struct domain *d = v->domain; > > > > - if ( !iommu_enabled || !d->arch.hvm_domain.irq.dpci ) > > + if ( !iommu_enabled || !d->arch.hvm_domain.irq->dpci ) > > If the prior patch is really needed - why did it not convert this? > (There are more such instances further down.) It's now done. > > @@ -122,7 +124,7 @@ void hvm_isa_irq_assert( > > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); > > > > - ASSERT(isa_irq <= 15); > > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); > > > > spin_lock(&d->arch.hvm_domain.irq_lock); > > > > @@ -139,7 +141,7 @@ void hvm_isa_irq_deassert( > > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); > > > > - ASSERT(isa_irq <= 15); > > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); > > Not sure about these two - perhaps they'd better be > BUILD_BUG_ON()s for the DomU side, and Dom0 should never > be allocated less than 16? Hm, I could add: BUILD_BUG_ON(VIOAPIC_NUM_PINS < 16); But this is going to make it harder to remove VIOAPIC_NUM_PINS later on. > > @@ -495,7 +497,7 @@ static void irq_dump(struct domain *d) > > (uint32_t) hvm_irq->isa_irq.pad[0], > > hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], > > hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); > > - for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 ) > > + for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) > > printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 > > " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", > > i, i+7, > > @@ -507,6 +509,13 @@ static void irq_dump(struct domain *d) > > hvm_irq->gsi_assert_count[i+5], > > hvm_irq->gsi_assert_count[i+6], > > hvm_irq->gsi_assert_count[i+7]); > > + if ( i != hvm_irq->nr_gsis ) > > + { > > + printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); > > + for ( ; i < hvm_irq->nr_gsis; i++) > > + printk(" %2.2"PRIu8, hvm_irq->gsi_assert_count[i]); > > + printk("\n"); > > + } > > I realize you've just copied this from existing code, but what > advantage does %2.2 have over just %2 here? Shouldn't it be just %3 anyway? Would you be fine with me fixing this here, or would you rather prefer a separate patch? > > @@ -545,6 +554,9 @@ static int irq_save_pci(struct domain *d, hvm_domain_context_t *h) > > unsigned int asserted, pdev, pintx; > > int rc; > > > > + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) > > + return -EOPNOTSUPP; > > + > > spin_lock(&d->arch.hvm_domain.irq_lock); > > > > pdev = hvm_irq->callback_via.pci.dev; > > @@ -592,6 +604,9 @@ static int irq_load_pci(struct domain *d, hvm_domain_context_t *h) > > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > > int link, dev, intx, gsi; > > > > + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) > > + return -EOPNOTSUPP; > > Why do you add these checks here? Because there's a: for ( gsi = 0; gsi < VIOAPIC_NUM_PINS; gsi++ ) hvm_irq->gsi_assert_count[gsi] = 0; A little bit below. I can change that to use nr_gsis instead and remove those checks (both here and in irq_save_pci). > > --- a/xen/include/asm-x86/domain.h > > +++ b/xen/include/asm-x86/domain.h > > @@ -17,7 +17,7 @@ > > #define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain)) > > > > #define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \ > > - d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector) > > + d->arch.hvm_domain.irq->callback_via_type == HVMIRQ_callback_vector) > > Please take the opportunity and properly parenthesize the use of > d here. Done. > > @@ -89,13 +77,27 @@ struct hvm_irq { > > u8 round_robin_prev_vcpu; > > > > struct hvm_irq_dpci *dpci; > > + > > + /* > > + * Number of wires asserting each GSI. > > + * > > + * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space > > + * except ISA IRQ 0, which is connected to GSI 2. > > + * PCI links map into this space via the PCI-ISA bridge. > > + * > > + * GSIs 16+ are used only be PCI devices. The mapping from PCI device to > > + * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 > > + */ > > + unsigned int nr_gsis; > > + u8 gsi_assert_count[]; > > }; > > I'm afraid at the same time (or in a later patch) the type of this array > (and maybe also the type of pci_link_assert_count[]) will need to be > widened. This one seems to be fine for PVH Dom0 (you will have to look at the next series), but I specifically avoid touching pci_link_assert_count. Again, better to comment this on the next patch series that actually implements the interrupt binding. This is needed because code in vioapic makes use of gsi_assert_count. Roger.
>>> On 27.03.17 at 19:28, <roger.pau@citrix.com> wrote: > On Mon, Mar 27, 2017 at 09:59:42AM -0600, Jan Beulich wrote: >> >>> On 27.03.17 at 12:18, <roger.pau@citrix.com> wrote: >> > @@ -122,7 +124,7 @@ void hvm_isa_irq_assert( >> > struct hvm_irq *hvm_irq = hvm_domain_irq(d); >> > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); >> > >> > - ASSERT(isa_irq <= 15); >> > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); >> > >> > spin_lock(&d->arch.hvm_domain.irq_lock); >> > >> > @@ -139,7 +141,7 @@ void hvm_isa_irq_deassert( >> > struct hvm_irq *hvm_irq = hvm_domain_irq(d); >> > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); >> > >> > - ASSERT(isa_irq <= 15); >> > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); >> >> Not sure about these two - perhaps they'd better be >> BUILD_BUG_ON()s for the DomU side, and Dom0 should never >> be allocated less than 16? > > Hm, I could add: > > BUILD_BUG_ON(VIOAPIC_NUM_PINS < 16); > > But this is going to make it harder to remove VIOAPIC_NUM_PINS later on. As said elsewhere, those remaining uses could/should become ARRAY_SIZE(). >> > @@ -495,7 +497,7 @@ static void irq_dump(struct domain *d) >> > (uint32_t) hvm_irq->isa_irq.pad[0], >> > hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], >> > hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); >> > - for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 ) >> > + for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) >> > printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 >> > " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", >> > i, i+7, >> > @@ -507,6 +509,13 @@ static void irq_dump(struct domain *d) >> > hvm_irq->gsi_assert_count[i+5], >> > hvm_irq->gsi_assert_count[i+6], >> > hvm_irq->gsi_assert_count[i+7]); >> > + if ( i != hvm_irq->nr_gsis ) >> > + { >> > + printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); >> > + for ( ; i < hvm_irq->nr_gsis; i++) >> > + printk(" %2.2"PRIu8, hvm_irq->gsi_assert_count[i]); >> > + printk("\n"); >> > + } >> >> I realize you've just copied this from existing code, but what >> advantage does %2.2 have over just %2 here? > > Shouldn't it be just %3 anyway? Would you be fine with me fixing this here, or > would you rather prefer a separate patch? Well, if you also want to change the existing ones, then a separate patch would be preferred. As to %2 vs %3 - how would the latter be any better if we want/need to change the type from u8 anyway? >> > @@ -545,6 +554,9 @@ static int irq_save_pci(struct domain *d, hvm_domain_context_t *h) >> > unsigned int asserted, pdev, pintx; >> > int rc; >> > >> > + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) >> > + return -EOPNOTSUPP; >> > + >> > spin_lock(&d->arch.hvm_domain.irq_lock); >> > >> > pdev = hvm_irq->callback_via.pci.dev; >> > @@ -592,6 +604,9 @@ static int irq_load_pci(struct domain *d, hvm_domain_context_t *h) >> > struct hvm_irq *hvm_irq = hvm_domain_irq(d); >> > int link, dev, intx, gsi; >> > >> > + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) >> > + return -EOPNOTSUPP; >> >> Why do you add these checks here? > > Because there's a: > > for ( gsi = 0; gsi < VIOAPIC_NUM_PINS; gsi++ ) > hvm_irq->gsi_assert_count[gsi] = 0; > > A little bit below. True for the load side, but I can't see anything like that for save. > I can change that to use nr_gsis instead and remove those > checks (both here and in irq_save_pci). Please do, there's no harm using the dynamic upper bound in that case. >> > @@ -89,13 +77,27 @@ struct hvm_irq { >> > u8 round_robin_prev_vcpu; >> > >> > struct hvm_irq_dpci *dpci; >> > + >> > + /* >> > + * Number of wires asserting each GSI. >> > + * >> > + * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space >> > + * except ISA IRQ 0, which is connected to GSI 2. >> > + * PCI links map into this space via the PCI-ISA bridge. >> > + * >> > + * GSIs 16+ are used only be PCI devices. The mapping from PCI device to >> > + * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 >> > + */ >> > + unsigned int nr_gsis; >> > + u8 gsi_assert_count[]; >> > }; >> >> I'm afraid at the same time (or in a later patch) the type of this array >> (and maybe also the type of pci_link_assert_count[]) will need to be >> widened. > > This one seems to be fine for PVH Dom0 (you will have to look at the next > series), but I specifically avoid touching pci_link_assert_count. Again, better > to comment this on the next patch series that actually implements the interrupt > binding. This is needed because code in vioapic makes use of > gsi_assert_count. Well, we can certainly discuss this there (whenever I get to look at it, as that's not 4.9 stuff now anymore), but for now I don't see the connection between my comment and your reply. Jan
On Tue, Mar 28, 2017 at 01:25:44AM -0600, Jan Beulich wrote: > >>> On 27.03.17 at 19:28, <roger.pau@citrix.com> wrote: > > On Mon, Mar 27, 2017 at 09:59:42AM -0600, Jan Beulich wrote: > >> >>> On 27.03.17 at 12:18, <roger.pau@citrix.com> wrote: > >> > @@ -122,7 +124,7 @@ void hvm_isa_irq_assert( > >> > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > >> > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); > >> > > >> > - ASSERT(isa_irq <= 15); > >> > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); > >> > > >> > spin_lock(&d->arch.hvm_domain.irq_lock); > >> > > >> > @@ -139,7 +141,7 @@ void hvm_isa_irq_deassert( > >> > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > >> > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); > >> > > >> > - ASSERT(isa_irq <= 15); > >> > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); > >> > >> Not sure about these two - perhaps they'd better be > >> BUILD_BUG_ON()s for the DomU side, and Dom0 should never > >> be allocated less than 16? > > > > Hm, I could add: > > > > BUILD_BUG_ON(VIOAPIC_NUM_PINS < 16); > > > > But this is going to make it harder to remove VIOAPIC_NUM_PINS later on. > > As said elsewhere, those remaining uses could/should become > ARRAY_SIZE(). Hm, but then I will have to use: ARRAY_SIZE(((struct hvm_hw_vioapic *)0)->redirtbl) Which is kind of cumbersome? I can define that into a macro I guess. > >> > @@ -495,7 +497,7 @@ static void irq_dump(struct domain *d) > >> > (uint32_t) hvm_irq->isa_irq.pad[0], > >> > hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], > >> > hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); > >> > - for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 ) > >> > + for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) > >> > printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 > >> > " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", > >> > i, i+7, > >> > @@ -507,6 +509,13 @@ static void irq_dump(struct domain *d) > >> > hvm_irq->gsi_assert_count[i+5], > >> > hvm_irq->gsi_assert_count[i+6], > >> > hvm_irq->gsi_assert_count[i+7]); > >> > + if ( i != hvm_irq->nr_gsis ) > >> > + { > >> > + printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); > >> > + for ( ; i < hvm_irq->nr_gsis; i++) > >> > + printk(" %2.2"PRIu8, hvm_irq->gsi_assert_count[i]); > >> > + printk("\n"); > >> > + } > >> > >> I realize you've just copied this from existing code, but what > >> advantage does %2.2 have over just %2 here? > > > > Shouldn't it be just %3 anyway? Would you be fine with me fixing this here, or > > would you rather prefer a separate patch? > > Well, if you also want to change the existing ones, then a separate > patch would be preferred. As to %2 vs %3 - how would the latter > be any better if we want/need to change the type from u8 anyway? ATM uint8_t can be 255, so %3 seems better (although AFAICT this is limited to 4, because there are 4 INT# lines to each GSI, and pending GSIs are not accumulated). I'm not sure I follow why do we want/need to change the type. > >> > @@ -545,6 +554,9 @@ static int irq_save_pci(struct domain *d, hvm_domain_context_t *h) > >> > unsigned int asserted, pdev, pintx; > >> > int rc; > >> > > >> > + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) > >> > + return -EOPNOTSUPP; > >> > + > >> > spin_lock(&d->arch.hvm_domain.irq_lock); > >> > > >> > pdev = hvm_irq->callback_via.pci.dev; > >> > @@ -592,6 +604,9 @@ static int irq_load_pci(struct domain *d, hvm_domain_context_t *h) > >> > struct hvm_irq *hvm_irq = hvm_domain_irq(d); > >> > int link, dev, intx, gsi; > >> > > >> > + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) > >> > + return -EOPNOTSUPP; > >> > >> Why do you add these checks here? > > > > Because there's a: > > > > for ( gsi = 0; gsi < VIOAPIC_NUM_PINS; gsi++ ) > > hvm_irq->gsi_assert_count[gsi] = 0; > > > > A little bit below. > > True for the load side, but I can't see anything like that for save. > > > I can change that to use nr_gsis instead and remove those > > checks (both here and in irq_save_pci). > > Please do, there's no harm using the dynamic upper bound in > that case. Now done. > >> > @@ -89,13 +77,27 @@ struct hvm_irq { > >> > u8 round_robin_prev_vcpu; > >> > > >> > struct hvm_irq_dpci *dpci; > >> > + > >> > + /* > >> > + * Number of wires asserting each GSI. > >> > + * > >> > + * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space > >> > + * except ISA IRQ 0, which is connected to GSI 2. > >> > + * PCI links map into this space via the PCI-ISA bridge. > >> > + * > >> > + * GSIs 16+ are used only be PCI devices. The mapping from PCI device to > >> > + * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 > >> > + */ > >> > + unsigned int nr_gsis; > >> > + u8 gsi_assert_count[]; > >> > }; > >> > >> I'm afraid at the same time (or in a later patch) the type of this array > >> (and maybe also the type of pci_link_assert_count[]) will need to be > >> widened. > > > > This one seems to be fine for PVH Dom0 (you will have to look at the next > > series), but I specifically avoid touching pci_link_assert_count. Again, better > > to comment this on the next patch series that actually implements the interrupt > > binding. This is needed because code in vioapic makes use of > > gsi_assert_count. > > Well, we can certainly discuss this there (whenever I get to look at > it, as that's not 4.9 stuff now anymore), but for now I don't see the > connection between my comment and your reply. Hm, I'm not sure I understand why would you like to change the type of this array. AFAICT the type of the array is not related to the number of vIO APIC pins, or the number of vIO APICs. Roger.
>>> On 28.03.17 at 10:40, <roger.pau@citrix.com> wrote: > On Tue, Mar 28, 2017 at 01:25:44AM -0600, Jan Beulich wrote: >> >>> On 27.03.17 at 19:28, <roger.pau@citrix.com> wrote: >> > On Mon, Mar 27, 2017 at 09:59:42AM -0600, Jan Beulich wrote: >> >> >>> On 27.03.17 at 12:18, <roger.pau@citrix.com> wrote: >> >> > @@ -122,7 +124,7 @@ void hvm_isa_irq_assert( >> >> > struct hvm_irq *hvm_irq = hvm_domain_irq(d); >> >> > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); >> >> > >> >> > - ASSERT(isa_irq <= 15); >> >> > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); >> >> > >> >> > spin_lock(&d->arch.hvm_domain.irq_lock); >> >> > >> >> > @@ -139,7 +141,7 @@ void hvm_isa_irq_deassert( >> >> > struct hvm_irq *hvm_irq = hvm_domain_irq(d); >> >> > unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); >> >> > >> >> > - ASSERT(isa_irq <= 15); >> >> > + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); >> >> >> >> Not sure about these two - perhaps they'd better be >> >> BUILD_BUG_ON()s for the DomU side, and Dom0 should never >> >> be allocated less than 16? >> > >> > Hm, I could add: >> > >> > BUILD_BUG_ON(VIOAPIC_NUM_PINS < 16); >> > >> > But this is going to make it harder to remove VIOAPIC_NUM_PINS later on. >> >> As said elsewhere, those remaining uses could/should become >> ARRAY_SIZE(). > > Hm, but then I will have to use: > > ARRAY_SIZE(((struct hvm_hw_vioapic *)0)->redirtbl) > > Which is kind of cumbersome? I can define that into a macro I guess. Indeed, ugly. How about struct hvm_vioapic { struct domain *domain; union { DECLARE_VIOAPIC(, VIOAPIC_NUM_PINS); struct hvm_hw_vioapic domU; }; }; (with "domU" subject to improvement)? This might at once allow making the save/restore code look better. >> >> > @@ -495,7 +497,7 @@ static void irq_dump(struct domain *d) >> >> > (uint32_t) hvm_irq->isa_irq.pad[0], >> >> > hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], >> >> > hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); >> >> > - for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 ) >> >> > + for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) >> >> > printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 >> >> > " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", >> >> > i, i+7, >> >> > @@ -507,6 +509,13 @@ static void irq_dump(struct domain *d) >> >> > hvm_irq->gsi_assert_count[i+5], >> >> > hvm_irq->gsi_assert_count[i+6], >> >> > hvm_irq->gsi_assert_count[i+7]); >> >> > + if ( i != hvm_irq->nr_gsis ) >> >> > + { >> >> > + printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); >> >> > + for ( ; i < hvm_irq->nr_gsis; i++) >> >> > + printk(" %2.2"PRIu8, hvm_irq->gsi_assert_count[i]); >> >> > + printk("\n"); >> >> > + } >> >> >> >> I realize you've just copied this from existing code, but what >> >> advantage does %2.2 have over just %2 here? >> > >> > Shouldn't it be just %3 anyway? Would you be fine with me fixing this here, or >> > would you rather prefer a separate patch? >> >> Well, if you also want to change the existing ones, then a separate >> patch would be preferred. As to %2 vs %3 - how would the latter >> be any better if we want/need to change the type from u8 anyway? > > ATM uint8_t can be 255, so %3 seems better (although AFAICT this is limited to > 4, because there are 4 INT# lines to each GSI, and pending GSIs are not > accumulated). I'm not sure I follow why do we want/need to change the type. For our DomU model there are four lines. Physical machines often declare more in ACPI, and I'm not sure there's a theoretical upper limit. >> >> > @@ -89,13 +77,27 @@ struct hvm_irq { >> >> > u8 round_robin_prev_vcpu; >> >> > >> >> > struct hvm_irq_dpci *dpci; >> >> > + >> >> > + /* >> >> > + * Number of wires asserting each GSI. >> >> > + * >> >> > + * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space >> >> > + * except ISA IRQ 0, which is connected to GSI 2. >> >> > + * PCI links map into this space via the PCI-ISA bridge. >> >> > + * >> >> > + * GSIs 16+ are used only be PCI devices. The mapping from PCI device to >> >> > + * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 >> >> > + */ >> >> > + unsigned int nr_gsis; >> >> > + u8 gsi_assert_count[]; >> >> > }; >> >> >> >> I'm afraid at the same time (or in a later patch) the type of this array >> >> (and maybe also the type of pci_link_assert_count[]) will need to be >> >> widened. >> > >> > This one seems to be fine for PVH Dom0 (you will have to look at the next >> > series), but I specifically avoid touching pci_link_assert_count. Again, better >> > to comment this on the next patch series that actually implements the interrupt >> > binding. This is needed because code in vioapic makes use of >> > gsi_assert_count. >> >> Well, we can certainly discuss this there (whenever I get to look at >> it, as that's not 4.9 stuff now anymore), but for now I don't see the >> connection between my comment and your reply. > > Hm, I'm not sure I understand why would you like to change the type of this > array. AFAICT the type of the array is not related to the number of vIO APIC > pins, or the number of vIO APICs. See above - I think it is related to the number of declared INTx lines (of which a DomU at present would only have 4). Or maybe I'm mistaken where the INTx value comes from here. Jan
On Tue, Mar 28, 2017 at 03:10:27AM -0600, Jan Beulich wrote: > >>> On 28.03.17 at 10:40, <roger.pau@citrix.com> wrote: > > On Tue, Mar 28, 2017 at 01:25:44AM -0600, Jan Beulich wrote: > >> >>> On 27.03.17 at 19:28, <roger.pau@citrix.com> wrote: > > ARRAY_SIZE(((struct hvm_hw_vioapic *)0)->redirtbl) > > > > Which is kind of cumbersome? I can define that into a macro I guess. > > Indeed, ugly. How about > > struct hvm_vioapic { > struct domain *domain; > union { > DECLARE_VIOAPIC(, VIOAPIC_NUM_PINS); > struct hvm_hw_vioapic domU; > }; > }; > > (with "domU" subject to improvement)? This might at once allow > making the save/restore code look better. Right, this indeed looks better, and will make the save/restore code neater. I'm not sure I can come up with a better name, "guest", "static"? > >> >> > @@ -495,7 +497,7 @@ static void irq_dump(struct domain *d) > >> >> > (uint32_t) hvm_irq->isa_irq.pad[0], > >> >> > hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], > >> >> > hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); > >> >> > - for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 ) > >> >> > + for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) > >> >> > printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 > >> >> > " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", > >> >> > i, i+7, > >> >> > @@ -507,6 +509,13 @@ static void irq_dump(struct domain *d) > >> >> > hvm_irq->gsi_assert_count[i+5], > >> >> > hvm_irq->gsi_assert_count[i+6], > >> >> > hvm_irq->gsi_assert_count[i+7]); > >> >> > + if ( i != hvm_irq->nr_gsis ) > >> >> > + { > >> >> > + printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); > >> >> > + for ( ; i < hvm_irq->nr_gsis; i++) > >> >> > + printk(" %2.2"PRIu8, hvm_irq->gsi_assert_count[i]); > >> >> > + printk("\n"); > >> >> > + } > >> >> > >> >> I realize you've just copied this from existing code, but what > >> >> advantage does %2.2 have over just %2 here? > >> > > >> > Shouldn't it be just %3 anyway? Would you be fine with me fixing this here, or > >> > would you rather prefer a separate patch? > >> > >> Well, if you also want to change the existing ones, then a separate > >> patch would be preferred. As to %2 vs %3 - how would the latter > >> be any better if we want/need to change the type from u8 anyway? > > > > ATM uint8_t can be 255, so %3 seems better (although AFAICT this is limited to > > 4, because there are 4 INT# lines to each GSI, and pending GSIs are not > > accumulated). I'm not sure I follow why do we want/need to change the type. > > For our DomU model there are four lines. Physical machines often > declare more in ACPI, and I'm not sure there's a theoretical upper > limit. In any case, I'm not sure this is relevant to PVH Dom0, where Xen should simply bind GSIs, but has no idea of all the interrupt routing behind them. This is relevant to DomUs because in that case Xen acts as a PCI interrupt router AFAICT. > >> >> > @@ -89,13 +77,27 @@ struct hvm_irq { > >> >> > u8 round_robin_prev_vcpu; > >> >> > > >> >> > struct hvm_irq_dpci *dpci; > >> >> > + > >> >> > + /* > >> >> > + * Number of wires asserting each GSI. > >> >> > + * > >> >> > + * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space > >> >> > + * except ISA IRQ 0, which is connected to GSI 2. > >> >> > + * PCI links map into this space via the PCI-ISA bridge. > >> >> > + * > >> >> > + * GSIs 16+ are used only be PCI devices. The mapping from PCI device to > >> >> > + * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 > >> >> > + */ > >> >> > + unsigned int nr_gsis; > >> >> > + u8 gsi_assert_count[]; > >> >> > }; > >> >> > >> >> I'm afraid at the same time (or in a later patch) the type of this array > >> >> (and maybe also the type of pci_link_assert_count[]) will need to be > >> >> widened. > >> > > >> > This one seems to be fine for PVH Dom0 (you will have to look at the next > >> > series), but I specifically avoid touching pci_link_assert_count. Again, better > >> > to comment this on the next patch series that actually implements the interrupt > >> > binding. This is needed because code in vioapic makes use of > >> > gsi_assert_count. > >> > >> Well, we can certainly discuss this there (whenever I get to look at > >> it, as that's not 4.9 stuff now anymore), but for now I don't see the > >> connection between my comment and your reply. > > > > Hm, I'm not sure I understand why would you like to change the type of this > > array. AFAICT the type of the array is not related to the number of vIO APIC > > pins, or the number of vIO APICs. > > See above - I think it is related to the number of declared INTx > lines (of which a DomU at present would only have 4). Or maybe > I'm mistaken where the INTx value comes from here. AFAICT in any case the INTx is always limited to INTA-INTD (4), but I don't think this is important for Dom0, because there Xen doesn't act as a PCI interrupt router, and thus doesn't need to know the INTx. In fact gsi_assert_count used with a PVH Dom0 should either be 0 or 1, because Xen is identity mapping machine GSIs into guest GSIs, without any kind of routing. Roger.
>>> On 28.03.17 at 11:34, <roger.pau@citrix.com> wrote: > On Tue, Mar 28, 2017 at 03:10:27AM -0600, Jan Beulich wrote: >> >>> On 28.03.17 at 10:40, <roger.pau@citrix.com> wrote: >> > On Tue, Mar 28, 2017 at 01:25:44AM -0600, Jan Beulich wrote: >> >> >>> On 27.03.17 at 19:28, <roger.pau@citrix.com> wrote: >> > ARRAY_SIZE(((struct hvm_hw_vioapic *)0)->redirtbl) >> > >> > Which is kind of cumbersome? I can define that into a macro I guess. >> >> Indeed, ugly. How about >> >> struct hvm_vioapic { >> struct domain *domain; >> union { >> DECLARE_VIOAPIC(, VIOAPIC_NUM_PINS); >> struct hvm_hw_vioapic domU; >> }; >> }; >> >> (with "domU" subject to improvement)? This might at once allow >> making the save/restore code look better. > > Right, this indeed looks better, and will make the save/restore code neater. > I'm not sure I can come up with a better name, "guest", "static"? Well, "static" is a keyword, so can't be used, and Dom0 is a "guest" too. >> >> >> > @@ -495,7 +497,7 @@ static void irq_dump(struct domain *d) >> >> >> > (uint32_t) hvm_irq->isa_irq.pad[0], >> >> >> > hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], >> >> >> > hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); >> >> >> > - for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 ) >> >> >> > + for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) >> >> >> > printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 >> >> >> > " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", >> >> >> > i, i+7, >> >> >> > @@ -507,6 +509,13 @@ static void irq_dump(struct domain *d) >> >> >> > hvm_irq->gsi_assert_count[i+5], >> >> >> > hvm_irq->gsi_assert_count[i+6], >> >> >> > hvm_irq->gsi_assert_count[i+7]); >> >> >> > + if ( i != hvm_irq->nr_gsis ) >> >> >> > + { >> >> >> > + printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); >> >> >> > + for ( ; i < hvm_irq->nr_gsis; i++) >> >> >> > + printk(" %2.2"PRIu8, hvm_irq->gsi_assert_count[i]); >> >> >> > + printk("\n"); >> >> >> > + } >> >> >> >> >> >> I realize you've just copied this from existing code, but what >> >> >> advantage does %2.2 have over just %2 here? >> >> > >> >> > Shouldn't it be just %3 anyway? Would you be fine with me fixing this here, or >> >> > would you rather prefer a separate patch? >> >> >> >> Well, if you also want to change the existing ones, then a separate >> >> patch would be preferred. As to %2 vs %3 - how would the latter >> >> be any better if we want/need to change the type from u8 anyway? >> > >> > ATM uint8_t can be 255, so %3 seems better (although AFAICT this is limited to >> > 4, because there are 4 INT# lines to each GSI, and pending GSIs are not >> > accumulated). I'm not sure I follow why do we want/need to change the type. >> >> For our DomU model there are four lines. Physical machines often >> declare more in ACPI, and I'm not sure there's a theoretical upper >> limit. > > In any case, I'm not sure this is relevant to PVH Dom0, where Xen should simply > bind GSIs, but has no idea of all the interrupt routing behind them. This is > relevant to DomUs because in that case Xen acts as a PCI interrupt router > AFAICT. Good point. But is all this refcounting code being bypassed for Dom0? If so, you wouldn't need to extend the array here. If not, overflows may still matter. Jan
On Tue, Mar 28, 2017 at 03:49:23AM -0600, Jan Beulich wrote: > >>> On 28.03.17 at 11:34, <roger.pau@citrix.com> wrote: > > On Tue, Mar 28, 2017 at 03:10:27AM -0600, Jan Beulich wrote: > >> >>> On 28.03.17 at 10:40, <roger.pau@citrix.com> wrote: > >> > On Tue, Mar 28, 2017 at 01:25:44AM -0600, Jan Beulich wrote: > >> For our DomU model there are four lines. Physical machines often > >> declare more in ACPI, and I'm not sure there's a theoretical upper > >> limit. > > > > In any case, I'm not sure this is relevant to PVH Dom0, where Xen should simply > > bind GSIs, but has no idea of all the interrupt routing behind them. This is > > relevant to DomUs because in that case Xen acts as a PCI interrupt router > > AFAICT. > > Good point. But is all this refcounting code being bypassed for Dom0? > If so, you wouldn't need to extend the array here. If not, overflows > may still matter. It's not really bypassed for Dom0 (Xen still needs to keep track of pending interrupts), but it's more simple. As said above a GSI is either pending (gsi_assert_count[gsi] == 1) or not (gsi_assert_count[gsi] == 0). I could maybe use a bitmap for that and avoid touching gsi_assert_count at all, but then I will have to introduce more changes. Roger.
>>> On 28.03.17 at 11:59, <roger.pau@citrix.com> wrote: > On Tue, Mar 28, 2017 at 03:49:23AM -0600, Jan Beulich wrote: >> >>> On 28.03.17 at 11:34, <roger.pau@citrix.com> wrote: >> > On Tue, Mar 28, 2017 at 03:10:27AM -0600, Jan Beulich wrote: >> >> >>> On 28.03.17 at 10:40, <roger.pau@citrix.com> wrote: >> >> > On Tue, Mar 28, 2017 at 01:25:44AM -0600, Jan Beulich wrote: >> >> For our DomU model there are four lines. Physical machines often >> >> declare more in ACPI, and I'm not sure there's a theoretical upper >> >> limit. >> > >> > In any case, I'm not sure this is relevant to PVH Dom0, where Xen should simply >> > bind GSIs, but has no idea of all the interrupt routing behind them. This is >> > relevant to DomUs because in that case Xen acts as a PCI interrupt router >> > AFAICT. >> >> Good point. But is all this refcounting code being bypassed for Dom0? >> If so, you wouldn't need to extend the array here. If not, overflows >> may still matter. > > It's not really bypassed for Dom0 (Xen still needs to keep track of pending > interrupts), but it's more simple. > > As said above a GSI is either pending (gsi_assert_count[gsi] == 1) or not > (gsi_assert_count[gsi] == 0). I could maybe use a bitmap for that and avoid > touching gsi_assert_count at all, but then I will have to introduce more > changes. I don't think there's a need, indeed. But I'd appreciate if you would add ASSERT()s to that effect. Jan
On Tue, Mar 28, 2017 at 04:33:49AM -0600, Jan Beulich wrote: > >>> On 28.03.17 at 11:59, <roger.pau@citrix.com> wrote: > > On Tue, Mar 28, 2017 at 03:49:23AM -0600, Jan Beulich wrote: > >> >>> On 28.03.17 at 11:34, <roger.pau@citrix.com> wrote: > >> > On Tue, Mar 28, 2017 at 03:10:27AM -0600, Jan Beulich wrote: > >> >> >>> On 28.03.17 at 10:40, <roger.pau@citrix.com> wrote: > >> >> > On Tue, Mar 28, 2017 at 01:25:44AM -0600, Jan Beulich wrote: > >> >> For our DomU model there are four lines. Physical machines often > >> >> declare more in ACPI, and I'm not sure there's a theoretical upper > >> >> limit. > >> > > >> > In any case, I'm not sure this is relevant to PVH Dom0, where Xen should simply > >> > bind GSIs, but has no idea of all the interrupt routing behind them. This is > >> > relevant to DomUs because in that case Xen acts as a PCI interrupt router > >> > AFAICT. > >> > >> Good point. But is all this refcounting code being bypassed for Dom0? > >> If so, you wouldn't need to extend the array here. If not, overflows > >> may still matter. > > > > It's not really bypassed for Dom0 (Xen still needs to keep track of pending > > interrupts), but it's more simple. > > > > As said above a GSI is either pending (gsi_assert_count[gsi] == 1) or not > > (gsi_assert_count[gsi] == 0). I could maybe use a bitmap for that and avoid > > touching gsi_assert_count at all, but then I will have to introduce more > > changes. > > I don't think there's a need, indeed. But I'd appreciate if you > would add ASSERT()s to that effect. OK, such ASSERTs would be added to hvm_gsi_{de}assert in patch #3 of the next series ("x86/dpci: bind legacy PCI interrupts to PVHv2 Dom0"). Roger.
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 0282986738..9a6cd9c9bf 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -457,7 +457,7 @@ void hvm_migrate_pirqs(struct vcpu *v) { struct domain *d = v->domain; - if ( !iommu_enabled || !d->arch.hvm_domain.irq.dpci ) + if ( !iommu_enabled || !d->arch.hvm_domain.irq->dpci ) return; spin_lock(&d->event_lock); @@ -619,11 +619,16 @@ int hvm_domain_initialise(struct domain *d) d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS); d->arch.hvm_domain.io_handler = xzalloc_array(struct hvm_io_handler, NR_IO_HANDLERS); + d->arch.hvm_domain.irq = xzalloc_bytes(hvm_irq_size(VIOAPIC_NUM_PINS)); + rc = -ENOMEM; - if ( !d->arch.hvm_domain.pl_time || + if ( !d->arch.hvm_domain.pl_time || !d->arch.hvm_domain.irq || !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler ) goto fail1; + /* Set the default number of GSIs */ + hvm_domain_irq(d)->nr_gsis = VIOAPIC_NUM_PINS; + /* need link to containing domain */ d->arch.hvm_domain.pl_time->domain = d; @@ -680,6 +685,7 @@ int hvm_domain_initialise(struct domain *d) xfree(d->arch.hvm_domain.io_handler); xfree(d->arch.hvm_domain.params); xfree(d->arch.hvm_domain.pl_time); + xfree(d->arch.hvm_domain.irq); fail0: hvm_destroy_cacheattr_region_list(d); return rc; @@ -722,6 +728,9 @@ void hvm_domain_destroy(struct domain *d) xfree(d->arch.hvm_domain.pl_time); d->arch.hvm_domain.pl_time = NULL; + + xfree(d->arch.hvm_domain.irq); + d->arch.hvm_domain.irq = NULL; } static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h) diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c index c2951ccf8a..6e67cae9bd 100644 --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -69,6 +69,7 @@ static void __hvm_pci_intx_assert( return; gsi = hvm_pci_intx_gsi(device, intx); + ASSERT(gsi < hvm_irq->nr_gsis); if ( hvm_irq->gsi_assert_count[gsi]++ == 0 ) assert_gsi(d, gsi); @@ -99,6 +100,7 @@ static void __hvm_pci_intx_deassert( return; gsi = hvm_pci_intx_gsi(device, intx); + ASSERT(gsi < hvm_irq->nr_gsis); --hvm_irq->gsi_assert_count[gsi]; link = hvm_pci_intx_link(device, intx); @@ -122,7 +124,7 @@ void hvm_isa_irq_assert( struct hvm_irq *hvm_irq = hvm_domain_irq(d); unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); - ASSERT(isa_irq <= 15); + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); spin_lock(&d->arch.hvm_domain.irq_lock); @@ -139,7 +141,7 @@ void hvm_isa_irq_deassert( struct hvm_irq *hvm_irq = hvm_domain_irq(d); unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); - ASSERT(isa_irq <= 15); + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); spin_lock(&d->arch.hvm_domain.irq_lock); @@ -363,7 +365,7 @@ void hvm_set_callback_via(struct domain *d, uint64_t via) { case HVMIRQ_callback_gsi: gsi = hvm_irq->callback_via.gsi = (uint8_t)via; - if ( (gsi == 0) || (gsi >= ARRAY_SIZE(hvm_irq->gsi_assert_count)) ) + if ( (gsi == 0) || (gsi >= hvm_irq->nr_gsis) ) hvm_irq->callback_via_type = HVMIRQ_callback_none; else if ( hvm_irq->callback_via_asserted && (hvm_irq->gsi_assert_count[gsi]++ == 0) ) @@ -419,9 +421,9 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v) if ( unlikely(v->mce_pending) ) return hvm_intack_mce; - if ( (plat->irq.callback_via_type == HVMIRQ_callback_vector) + if ( (plat->irq->callback_via_type == HVMIRQ_callback_vector) && vcpu_info(v, evtchn_upcall_pending) ) - return hvm_intack_vector(plat->irq.callback_via.vector); + return hvm_intack_vector(plat->irq->callback_via.vector); if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output ) return hvm_intack_pic(0); @@ -495,7 +497,7 @@ static void irq_dump(struct domain *d) (uint32_t) hvm_irq->isa_irq.pad[0], hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); - for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 ) + for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", i, i+7, @@ -507,6 +509,13 @@ static void irq_dump(struct domain *d) hvm_irq->gsi_assert_count[i+5], hvm_irq->gsi_assert_count[i+6], hvm_irq->gsi_assert_count[i+7]); + if ( i != hvm_irq->nr_gsis ) + { + printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); + for ( ; i < hvm_irq->nr_gsis; i++) + printk(" %2.2"PRIu8, hvm_irq->gsi_assert_count[i]); + printk("\n"); + } printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", hvm_irq->pci_link_assert_count[0], hvm_irq->pci_link_assert_count[1], @@ -545,6 +554,9 @@ static int irq_save_pci(struct domain *d, hvm_domain_context_t *h) unsigned int asserted, pdev, pintx; int rc; + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) + return -EOPNOTSUPP; + spin_lock(&d->arch.hvm_domain.irq_lock); pdev = hvm_irq->callback_via.pci.dev; @@ -592,6 +604,9 @@ static int irq_load_pci(struct domain *d, hvm_domain_context_t *h) struct hvm_irq *hvm_irq = hvm_domain_irq(d); int link, dev, intx, gsi; + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) + return -EOPNOTSUPP; + /* Load the PCI IRQ lines */ if ( hvm_load_entry(PCI_IRQ, h, &hvm_irq->pci_intx) != 0 ) return -EINVAL; diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c index 080183ea31..50e2f00214 100644 --- a/xen/drivers/passthrough/io.c +++ b/xen/drivers/passthrough/io.c @@ -195,7 +195,7 @@ struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *d) if ( !d || !is_hvm_domain(d) ) return NULL; - return d->arch.hvm_domain.irq.dpci; + return d->arch.hvm_domain.irq->dpci; } void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci) @@ -333,7 +333,7 @@ int pt_irq_create_bind( for ( i = 0; i < NR_HVM_IRQS; i++ ) INIT_LIST_HEAD(&hvm_irq_dpci->girq[i]); - d->arch.hvm_domain.irq.dpci = hvm_irq_dpci; + d->arch.hvm_domain.irq->dpci = hvm_irq_dpci; } info = pirq_get_info(d, pirq); @@ -788,7 +788,7 @@ static int _hvm_dpci_msi_eoi(struct domain *d, void hvm_dpci_msi_eoi(struct domain *d, int vector) { - if ( !iommu_enabled || !d->arch.hvm_domain.irq.dpci ) + if ( !iommu_enabled || !d->arch.hvm_domain.irq->dpci ) return; spin_lock(&d->event_lock); @@ -798,7 +798,7 @@ void hvm_dpci_msi_eoi(struct domain *d, int vector) static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci) { - if ( unlikely(!d->arch.hvm_domain.irq.dpci) ) + if ( unlikely(!d->arch.hvm_domain.irq->dpci) ) { ASSERT_UNREACHABLE(); return; diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c index beddd42701..b5b865a2d4 100644 --- a/xen/drivers/passthrough/pci.c +++ b/xen/drivers/passthrough/pci.c @@ -815,7 +815,7 @@ static int pci_clean_dpci_irqs(struct domain *d) return ret; } - d->arch.hvm_domain.irq.dpci = NULL; + d->arch.hvm_domain.irq->dpci = NULL; free_hvm_irq_dpci(hvm_irq_dpci); } spin_unlock(&d->event_lock); diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index ec14cce81f..0b7e43fa16 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -17,7 +17,7 @@ #define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain)) #define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \ - d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector) + d->arch.hvm_domain.irq->callback_via_type == HVMIRQ_callback_vector) #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain)) #define is_domain_direct_mapped(d) ((void)(d), 0) diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index 420cbdc609..c3cca94a97 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -125,7 +125,7 @@ struct hvm_domain { /* Lock protects access to irq, vpic and vioapic. */ spinlock_t irq_lock; - struct hvm_irq irq; + struct hvm_irq *irq; struct hvm_hw_vpic vpic[2]; /* 0=master; 1=slave */ struct hvm_vioapic *vioapic; struct hvm_hw_stdvga stdvga; diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h index 17a957d4b5..7d45293aed 100644 --- a/xen/include/asm-x86/hvm/irq.h +++ b/xen/include/asm-x86/hvm/irq.h @@ -67,18 +67,6 @@ struct hvm_irq { u8 pci_link_assert_count[4]; /* - * Number of wires asserting each GSI. - * - * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space - * except ISA IRQ 0, which is connected to GSI 2. - * PCI links map into this space via the PCI-ISA bridge. - * - * GSIs 16+ are used only be PCI devices. The mapping from PCI device to - * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 - */ - u8 gsi_assert_count[VIOAPIC_NUM_PINS]; - - /* * GSIs map onto PIC/IO-APIC in the usual way: * 0-7: Master 8259 PIC, IO-APIC pins 0-7 * 8-15: Slave 8259 PIC, IO-APIC pins 8-15 @@ -89,13 +77,27 @@ struct hvm_irq { u8 round_robin_prev_vcpu; struct hvm_irq_dpci *dpci; + + /* + * Number of wires asserting each GSI. + * + * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space + * except ISA IRQ 0, which is connected to GSI 2. + * PCI links map into this space via the PCI-ISA bridge. + * + * GSIs 16+ are used only be PCI devices. The mapping from PCI device to + * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 + */ + unsigned int nr_gsis; + u8 gsi_assert_count[]; }; #define hvm_pci_intx_gsi(dev, intx) \ (((((dev)<<2) + ((dev)>>3) + (intx)) & 31) + 16) #define hvm_pci_intx_link(dev, intx) \ (((dev) + (intx)) & 3) -#define hvm_domain_irq(d) (&(d)->arch.hvm_domain.irq) +#define hvm_domain_irq(d) ((d)->arch.hvm_domain.irq) +#define hvm_irq_size(cnt) offsetof(struct hvm_irq, gsi_assert_count[cnt]) #define hvm_isa_irq_to_gsi(isa_irq) ((isa_irq) ? : 2)
Rearrange the fields of hvm_irq so that gsi_assert_count can be converted into a variable size array and add a new field to account the number of GSIs. Due to this changes the irq member in the hvm_domain struct also needs to become a pointer set at runtime. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> --- xen/arch/x86/hvm/hvm.c | 13 +++++++++++-- xen/arch/x86/hvm/irq.c | 27 +++++++++++++++++++++------ xen/drivers/passthrough/io.c | 8 ++++---- xen/drivers/passthrough/pci.c | 2 +- xen/include/asm-x86/domain.h | 2 +- xen/include/asm-x86/hvm/domain.h | 2 +- xen/include/asm-x86/hvm/irq.h | 28 +++++++++++++++------------- 7 files changed, 54 insertions(+), 28 deletions(-)