Message ID | 1458871508-17279-17-git-send-email-andre.przywara@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Fri, Mar 25, 2016 at 02:04:39AM +0000, Andre Przywara wrote: > Signed-off-by: Andre Przywara <andre.przywara@arm.com> > --- > virt/kvm/arm/vgic/vgic_mmio.c | 87 ++++++++++++++++++++++++++++++++++++++++++- > 1 file changed, 85 insertions(+), 2 deletions(-) > > diff --git a/virt/kvm/arm/vgic/vgic_mmio.c b/virt/kvm/arm/vgic/vgic_mmio.c > index 0688a69..8514f92 100644 > --- a/virt/kvm/arm/vgic/vgic_mmio.c > +++ b/virt/kvm/arm/vgic/vgic_mmio.c > @@ -206,6 +206,89 @@ static int vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, > return 0; > } > > +static int vgic_mmio_read_pending(struct kvm_vcpu *vcpu, > + struct kvm_io_device *this, > + gpa_t addr, int len, void *val) > +{ > + struct vgic_io_device *iodev = container_of(this, > + struct vgic_io_device, dev); > + u32 intid = (addr - iodev->base_addr) * 8; > + u32 value = 0; > + int i; > + > + if (iodev->redist_vcpu) > + vcpu = iodev->redist_vcpu; > + > + /* Loop over all IRQs affected by this read */ > + for (i = 0; i < len * 8; i++) { > + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); > + > + spin_lock(&irq->irq_lock); > + if (irq->pending) > + value |= (1U << i); > + spin_unlock(&irq->irq_lock); here there clearly is no need to take the lock (because a bool read is atomic), but that should be explained in a one-line comment. > + } > + > + write_mask32(value, addr & 3, len, val); > + return 0; > +} > + > +static int vgic_mmio_write_spending(struct kvm_vcpu *vcpu, > + struct kvm_io_device *this, > + gpa_t addr, int len, const void *val) > +{ > + struct vgic_io_device *iodev = container_of(this, > + struct vgic_io_device, dev); > + u32 intid = (addr - iodev->base_addr) * 8; > + int i; > + > + if (iodev->redist_vcpu) > + vcpu = iodev->redist_vcpu; > + > + for_each_set_bit(i, val, len * 8) { > + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); > + > + spin_lock(&irq->irq_lock); > + irq->pending = true; > + if (irq->config == VGIC_CONFIG_LEVEL) > + irq->soft_pending = true; > + > + vgic_queue_irq(vcpu->kvm, irq); > + } > + > + return 0; > +} > + > +static int vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, > + struct kvm_io_device *this, > + gpa_t addr, int len, const void *val) > +{ > + struct vgic_io_device *iodev = container_of(this, > + struct vgic_io_device, dev); > + u32 intid = (addr - iodev->base_addr) * 8; > + int i; > + > + if (iodev->redist_vcpu) > + vcpu = iodev->redist_vcpu; > + > + for_each_set_bit(i, val, len * 8) { > + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); > + > + spin_lock(&irq->irq_lock); > + > + if (irq->config == VGIC_CONFIG_LEVEL) { > + irq->soft_pending = false; > + irq->pending = irq->line_level; > + } else { > + irq->pending = false; > + } > + /* TODO: Does the exit/entry code take care of "unqueuing"? */ see previous patch comment > + > + spin_unlock(&irq->irq_lock); > + } > + return 0; > +} > + > struct vgic_register_region vgic_v2_dist_registers[] = { > REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL, > vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12), > @@ -216,9 +299,9 @@ struct vgic_register_region vgic_v2_dist_registers[] = { > REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR, > vgic_mmio_read_enable, vgic_mmio_write_cenable, 1), > REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, > - vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1), > + vgic_mmio_read_pending, vgic_mmio_write_spending, 1), > REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR, > - vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1), > + vgic_mmio_read_pending, vgic_mmio_write_cpending, 1), > REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, > vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1), > REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR, > -- > 2.7.3 >
On 31/03/16 10:35, Christoffer Dall wrote: > On Fri, Mar 25, 2016 at 02:04:39AM +0000, Andre Przywara wrote: >> Signed-off-by: Andre Przywara <andre.przywara@arm.com> >> --- >> virt/kvm/arm/vgic/vgic_mmio.c | 87 ++++++++++++++++++++++++++++++++++++++++++- >> 1 file changed, 85 insertions(+), 2 deletions(-) >> >> diff --git a/virt/kvm/arm/vgic/vgic_mmio.c b/virt/kvm/arm/vgic/vgic_mmio.c >> index 0688a69..8514f92 100644 >> --- a/virt/kvm/arm/vgic/vgic_mmio.c >> +++ b/virt/kvm/arm/vgic/vgic_mmio.c >> @@ -206,6 +206,89 @@ static int vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, >> return 0; >> } >> >> +static int vgic_mmio_read_pending(struct kvm_vcpu *vcpu, >> + struct kvm_io_device *this, >> + gpa_t addr, int len, void *val) >> +{ >> + struct vgic_io_device *iodev = container_of(this, >> + struct vgic_io_device, dev); >> + u32 intid = (addr - iodev->base_addr) * 8; >> + u32 value = 0; >> + int i; >> + >> + if (iodev->redist_vcpu) >> + vcpu = iodev->redist_vcpu; >> + >> + /* Loop over all IRQs affected by this read */ >> + for (i = 0; i < len * 8; i++) { >> + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); >> + >> + spin_lock(&irq->irq_lock); >> + if (irq->pending) >> + value |= (1U << i); >> + spin_unlock(&irq->irq_lock); > > here there clearly is no need to take the lock (because a bool read is > atomic), but that should be explained in a one-line comment. Is that really true? Isn't it that another lock holder expects full control over the IRQ struct, including the freedom to change values at will without caring about other observers? I might be too paranoid here, but I think I explicitly added the lock here for a reason (which I don't remember anymore, sadly). Cheers, Andre. >> + } >> + >> + write_mask32(value, addr & 3, len, val); >> + return 0; >> +} >> + >> +static int vgic_mmio_write_spending(struct kvm_vcpu *vcpu, >> + struct kvm_io_device *this, >> + gpa_t addr, int len, const void *val) >> +{ >> + struct vgic_io_device *iodev = container_of(this, >> + struct vgic_io_device, dev); >> + u32 intid = (addr - iodev->base_addr) * 8; >> + int i; >> + >> + if (iodev->redist_vcpu) >> + vcpu = iodev->redist_vcpu; >> + >> + for_each_set_bit(i, val, len * 8) { >> + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); >> + >> + spin_lock(&irq->irq_lock); >> + irq->pending = true; >> + if (irq->config == VGIC_CONFIG_LEVEL) >> + irq->soft_pending = true; >> + >> + vgic_queue_irq(vcpu->kvm, irq); >> + } >> + >> + return 0; >> +} >> + >> +static int vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, >> + struct kvm_io_device *this, >> + gpa_t addr, int len, const void *val) >> +{ >> + struct vgic_io_device *iodev = container_of(this, >> + struct vgic_io_device, dev); >> + u32 intid = (addr - iodev->base_addr) * 8; >> + int i; >> + >> + if (iodev->redist_vcpu) >> + vcpu = iodev->redist_vcpu; >> + >> + for_each_set_bit(i, val, len * 8) { >> + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); >> + >> + spin_lock(&irq->irq_lock); >> + >> + if (irq->config == VGIC_CONFIG_LEVEL) { >> + irq->soft_pending = false; >> + irq->pending = irq->line_level; >> + } else { >> + irq->pending = false; >> + } >> + /* TODO: Does the exit/entry code take care of "unqueuing"? */ > > see previous patch comment > >> + >> + spin_unlock(&irq->irq_lock); >> + } >> + return 0; >> +} >> + >> struct vgic_register_region vgic_v2_dist_registers[] = { >> REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL, >> vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12), >> @@ -216,9 +299,9 @@ struct vgic_register_region vgic_v2_dist_registers[] = { >> REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR, >> vgic_mmio_read_enable, vgic_mmio_write_cenable, 1), >> REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, >> - vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1), >> + vgic_mmio_read_pending, vgic_mmio_write_spending, 1), >> REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR, >> - vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1), >> + vgic_mmio_read_pending, vgic_mmio_write_cpending, 1), >> REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, >> vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1), >> REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR, >> -- >> 2.7.3 >> >
On Mon, Apr 11, 2016 at 12:31:32PM +0100, Andre Przywara wrote: > On 31/03/16 10:35, Christoffer Dall wrote: > > On Fri, Mar 25, 2016 at 02:04:39AM +0000, Andre Przywara wrote: > >> Signed-off-by: Andre Przywara <andre.przywara@arm.com> > >> --- > >> virt/kvm/arm/vgic/vgic_mmio.c | 87 ++++++++++++++++++++++++++++++++++++++++++- > >> 1 file changed, 85 insertions(+), 2 deletions(-) > >> > >> diff --git a/virt/kvm/arm/vgic/vgic_mmio.c b/virt/kvm/arm/vgic/vgic_mmio.c > >> index 0688a69..8514f92 100644 > >> --- a/virt/kvm/arm/vgic/vgic_mmio.c > >> +++ b/virt/kvm/arm/vgic/vgic_mmio.c > >> @@ -206,6 +206,89 @@ static int vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, > >> return 0; > >> } > >> > >> +static int vgic_mmio_read_pending(struct kvm_vcpu *vcpu, > >> + struct kvm_io_device *this, > >> + gpa_t addr, int len, void *val) > >> +{ > >> + struct vgic_io_device *iodev = container_of(this, > >> + struct vgic_io_device, dev); > >> + u32 intid = (addr - iodev->base_addr) * 8; > >> + u32 value = 0; > >> + int i; > >> + > >> + if (iodev->redist_vcpu) > >> + vcpu = iodev->redist_vcpu; > >> + > >> + /* Loop over all IRQs affected by this read */ > >> + for (i = 0; i < len * 8; i++) { > >> + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); > >> + > >> + spin_lock(&irq->irq_lock); > >> + if (irq->pending) > >> + value |= (1U << i); > >> + spin_unlock(&irq->irq_lock); > > > > here there clearly is no need to take the lock (because a bool read is > > atomic), but that should be explained in a one-line comment. > > Is that really true? Isn't it that another lock holder expects full > control over the IRQ struct, including the freedom to change values at > will without caring about other observers? Consider the following three cases, assuming pending is clear in the initial state: Case 1: CPU A CPU B ----- ----- read pending lock irq set pending unlock irq Case 2: CPU A CPU B ----- ----- lock irq read pending set pending unlock irq Case 3: CPU A CPU B ----- ----- lock irq set pending unlock irq read pending The only effect of adding a lock/unlock around the read_pending() operation is to force case 2 to be equivalent to case 3, but case 1 could still happen, so this just boils down to observing the value before or after the write, both of which are fine. If there were weird side effect from reading this value, or even getting to the struct would depend on other things CPU B could do while holding the lock, it would be a different story, but here I'm faily certain you don't need the lock. > I might be too paranoid here, but I think I explicitly added the lock > here for a reason (which I don't remember anymore, sadly). > Can you can find an example where something breaks without holding the lock here? Thanks, -Christoffer
diff --git a/virt/kvm/arm/vgic/vgic_mmio.c b/virt/kvm/arm/vgic/vgic_mmio.c index 0688a69..8514f92 100644 --- a/virt/kvm/arm/vgic/vgic_mmio.c +++ b/virt/kvm/arm/vgic/vgic_mmio.c @@ -206,6 +206,89 @@ static int vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, return 0; } +static int vgic_mmio_read_pending(struct kvm_vcpu *vcpu, + struct kvm_io_device *this, + gpa_t addr, int len, void *val) +{ + struct vgic_io_device *iodev = container_of(this, + struct vgic_io_device, dev); + u32 intid = (addr - iodev->base_addr) * 8; + u32 value = 0; + int i; + + if (iodev->redist_vcpu) + vcpu = iodev->redist_vcpu; + + /* Loop over all IRQs affected by this read */ + for (i = 0; i < len * 8; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + if (irq->pending) + value |= (1U << i); + spin_unlock(&irq->irq_lock); + } + + write_mask32(value, addr & 3, len, val); + return 0; +} + +static int vgic_mmio_write_spending(struct kvm_vcpu *vcpu, + struct kvm_io_device *this, + gpa_t addr, int len, const void *val) +{ + struct vgic_io_device *iodev = container_of(this, + struct vgic_io_device, dev); + u32 intid = (addr - iodev->base_addr) * 8; + int i; + + if (iodev->redist_vcpu) + vcpu = iodev->redist_vcpu; + + for_each_set_bit(i, val, len * 8) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + irq->pending = true; + if (irq->config == VGIC_CONFIG_LEVEL) + irq->soft_pending = true; + + vgic_queue_irq(vcpu->kvm, irq); + } + + return 0; +} + +static int vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, + struct kvm_io_device *this, + gpa_t addr, int len, const void *val) +{ + struct vgic_io_device *iodev = container_of(this, + struct vgic_io_device, dev); + u32 intid = (addr - iodev->base_addr) * 8; + int i; + + if (iodev->redist_vcpu) + vcpu = iodev->redist_vcpu; + + for_each_set_bit(i, val, len * 8) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + + if (irq->config == VGIC_CONFIG_LEVEL) { + irq->soft_pending = false; + irq->pending = irq->line_level; + } else { + irq->pending = false; + } + /* TODO: Does the exit/entry code take care of "unqueuing"? */ + + spin_unlock(&irq->irq_lock); + } + return 0; +} + struct vgic_register_region vgic_v2_dist_registers[] = { REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL, vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12), @@ -216,9 +299,9 @@ struct vgic_register_region vgic_v2_dist_registers[] = { REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR, vgic_mmio_read_enable, vgic_mmio_write_cenable, 1), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, - vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1), + vgic_mmio_read_pending, vgic_mmio_write_spending, 1), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR, - vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1), + vgic_mmio_read_pending, vgic_mmio_write_cpending, 1), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, vgic_mmio_read_nyi, vgic_mmio_write_nyi, 1), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
Signed-off-by: Andre Przywara <andre.przywara@arm.com> --- virt/kvm/arm/vgic/vgic_mmio.c | 87 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 2 deletions(-)