diff mbox series

[for-4.2,v5,1/2] kvm: s390: split too big memory section on several memslots

Message ID 20190807153241.24050-1-imammedo@redhat.com (mailing list archive)
State New, archived
Headers show
Series [for-4.2,v5,1/2] kvm: s390: split too big memory section on several memslots | expand

Commit Message

Igor Mammedov Aug. 7, 2019, 3:32 p.m. UTC
Max memslot size supported by kvm on s390 is 8Tb,
move logic of splitting RAM in chunks upto 8T to KVM code.

This way it will hide KVM specific restrictions in KVM code
and won't affect baord level design decisions. Which would allow
us to avoid misusing memory_region_allocate_system_memory() API
and eventually use a single hostmem backend for guest RAM.

Signed-off-by: Igor Mammedov <imammedo@redhat.com>
---
v5:
  * move computation 'size -= slot_size' inside of loop body
          (David Hildenbrand <david@redhat.com>)
v4:
  * fix compilation issue
          (Christian Borntraeger <borntraeger@de.ibm.com>)
  * advance HVA along with GPA in kvm_set_phys_mem()
          (Christian Borntraeger <borntraeger@de.ibm.com>)

patch prepares only KVM side for switching to single RAM memory region
another patch will take care of  dropping manual RAM partitioning in
s390 code.
---
 include/sysemu/kvm_int.h   |  1 +
 accel/kvm/kvm-all.c        | 82 ++++++++++++++++++++++++--------------
 hw/s390x/s390-virtio-ccw.c |  9 -----
 target/s390x/kvm.c         | 12 ++++++
 4 files changed, 64 insertions(+), 40 deletions(-)

Comments

Cornelia Huck Aug. 20, 2019, 4:07 p.m. UTC | #1
On Wed,  7 Aug 2019 11:32:41 -0400
Igor Mammedov <imammedo@redhat.com> wrote:

> Max memslot size supported by kvm on s390 is 8Tb,
> move logic of splitting RAM in chunks upto 8T to KVM code.
> 
> This way it will hide KVM specific restrictions in KVM code
> and won't affect baord level design decisions. Which would allow
> us to avoid misusing memory_region_allocate_system_memory() API
> and eventually use a single hostmem backend for guest RAM.
> 
> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> ---
> v5:
>   * move computation 'size -= slot_size' inside of loop body
>           (David Hildenbrand <david@redhat.com>)
> v4:
>   * fix compilation issue
>           (Christian Borntraeger <borntraeger@de.ibm.com>)
>   * advance HVA along with GPA in kvm_set_phys_mem()
>           (Christian Borntraeger <borntraeger@de.ibm.com>)
> 
> patch prepares only KVM side for switching to single RAM memory region
> another patch will take care of  dropping manual RAM partitioning in
> s390 code.

I may have lost track a bit -- what is the status of this patch (and
the series)?

> ---
>  include/sysemu/kvm_int.h   |  1 +
>  accel/kvm/kvm-all.c        | 82 ++++++++++++++++++++++++--------------
>  hw/s390x/s390-virtio-ccw.c |  9 -----
>  target/s390x/kvm.c         | 12 ++++++
>  4 files changed, 64 insertions(+), 40 deletions(-)
> 
> diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
> index 31df465fdc..7f7520bce2 100644
> --- a/include/sysemu/kvm_int.h
> +++ b/include/sysemu/kvm_int.h
> @@ -41,4 +41,5 @@ typedef struct KVMMemoryListener {
>  void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
>                                    AddressSpace *as, int as_id);
>  
> +void kvm_set_max_memslot_size(hwaddr max_slot_size);
>  #endif
> diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
> index f450f25295..8153556335 100644
> --- a/accel/kvm/kvm-all.c
> +++ b/accel/kvm/kvm-all.c
> @@ -138,6 +138,7 @@ bool kvm_direct_msi_allowed;
>  bool kvm_ioeventfd_any_length_allowed;
>  bool kvm_msi_use_devid;
>  static bool kvm_immediate_exit;
> +static hwaddr kvm_max_slot_size = ~0;
>  
>  static const KVMCapabilityInfo kvm_required_capabilites[] = {
>      KVM_CAP_INFO(USER_MEMORY),
> @@ -951,6 +952,14 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
>      return NULL;
>  }
>  
> +void kvm_set_max_memslot_size(hwaddr max_slot_size)
> +{
> +    g_assert(
> +        ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
> +    );
> +    kvm_max_slot_size = max_slot_size;
> +}
> +
>  static void kvm_set_phys_mem(KVMMemoryListener *kml,
>                               MemoryRegionSection *section, bool add)
>  {
> @@ -958,7 +967,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
>      int err;
>      MemoryRegion *mr = section->mr;
>      bool writeable = !mr->readonly && !mr->rom_device;
> -    hwaddr start_addr, size;
> +    hwaddr start_addr, size, slot_size;
>      void *ram;
>  
>      if (!memory_region_is_ram(mr)) {
> @@ -983,41 +992,52 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
>      kvm_slots_lock(kml);
>  
>      if (!add) {
> -        mem = kvm_lookup_matching_slot(kml, start_addr, size);
> -        if (!mem) {
> -            goto out;
> -        }
> -        if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
> -            kvm_physical_sync_dirty_bitmap(kml, section);
> -        }
> +        do {
> +            slot_size = kvm_max_slot_size < size ? kvm_max_slot_size : size;
> +            mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
> +            if (!mem) {
> +                goto out;
> +            }
> +            if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
> +                kvm_physical_sync_dirty_bitmap(kml, section);
> +            }
>  
> -        /* unregister the slot */
> -        g_free(mem->dirty_bmap);
> -        mem->dirty_bmap = NULL;
> -        mem->memory_size = 0;
> -        mem->flags = 0;
> -        err = kvm_set_user_memory_region(kml, mem, false);
> -        if (err) {
> -            fprintf(stderr, "%s: error unregistering slot: %s\n",
> -                    __func__, strerror(-err));
> -            abort();
> -        }
> +            /* unregister the slot */
> +            g_free(mem->dirty_bmap);
> +            mem->dirty_bmap = NULL;
> +            mem->memory_size = 0;
> +            mem->flags = 0;
> +            err = kvm_set_user_memory_region(kml, mem, false);
> +            if (err) {
> +                fprintf(stderr, "%s: error unregistering slot: %s\n",
> +                        __func__, strerror(-err));
> +                abort();
> +            }
> +            start_addr += slot_size;
> +            size -= slot_size;
> +        } while (size);
>          goto out;
>      }
>  
>      /* register the new slot */
> -    mem = kvm_alloc_slot(kml);
> -    mem->memory_size = size;
> -    mem->start_addr = start_addr;
> -    mem->ram = ram;
> -    mem->flags = kvm_mem_flags(mr);
> -
> -    err = kvm_set_user_memory_region(kml, mem, true);
> -    if (err) {
> -        fprintf(stderr, "%s: error registering slot: %s\n", __func__,
> -                strerror(-err));
> -        abort();
> -    }
> +    do {
> +        slot_size = kvm_max_slot_size < size ? kvm_max_slot_size : size;
> +        mem = kvm_alloc_slot(kml);
> +        mem->memory_size = slot_size;
> +        mem->start_addr = start_addr;
> +        mem->ram = ram;
> +        mem->flags = kvm_mem_flags(mr);
> +
> +        err = kvm_set_user_memory_region(kml, mem, true);
> +        if (err) {
> +            fprintf(stderr, "%s: error registering slot: %s\n", __func__,
> +                    strerror(-err));
> +            abort();
> +        }
> +        start_addr += slot_size;
> +        ram += slot_size;
> +        size -= slot_size;
> +    } while (size);
>  
>  out:
>      kvm_slots_unlock(kml);
> diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
> index 5b6a9a4e55..0c03ffb7c7 100644
> --- a/hw/s390x/s390-virtio-ccw.c
> +++ b/hw/s390x/s390-virtio-ccw.c
> @@ -151,15 +151,6 @@ static void virtio_ccw_register_hcalls(void)
>                                     virtio_ccw_hcall_early_printk);
>  }
>  
> -/*
> - * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
> - * as the dirty bitmap must be managed by bitops that take an int as
> - * position indicator. If we have a guest beyond that we will split off
> - * new subregions. The split must happen on a segment boundary (1MB).
> - */
> -#define KVM_MEM_MAX_NR_PAGES ((1ULL << 31) - 1)
> -#define SEG_MSK (~0xfffffULL)
> -#define KVM_SLOT_MAX_BYTES ((KVM_MEM_MAX_NR_PAGES * TARGET_PAGE_SIZE) & SEG_MSK)
>  static void s390_memory_init(ram_addr_t mem_size)
>  {
>      MemoryRegion *sysmem = get_system_memory();
> diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
> index 6e814c230b..6b1428a760 100644
> --- a/target/s390x/kvm.c
> +++ b/target/s390x/kvm.c
> @@ -28,6 +28,7 @@
>  #include "cpu.h"
>  #include "internal.h"
>  #include "kvm_s390x.h"
> +#include "sysemu/kvm_int.h"
>  #include "qapi/error.h"
>  #include "qemu/error-report.h"
>  #include "qemu/timer.h"
> @@ -122,6 +123,16 @@
>  #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \
>                                       (max_cpus + NR_LOCAL_IRQS))
>  
> +/*
> + * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
> + * as the dirty bitmap must be managed by bitops that take an int as
> + * position indicator. If we have a guest beyond that we will split off
> + * new subregions. The split must happen on a segment boundary (1MB).
> + */
> +#define KVM_MEM_MAX_NR_PAGES ((1ULL << 31) - 1)
> +#define SEG_MSK (~0xfffffULL)
> +#define KVM_SLOT_MAX_BYTES ((KVM_MEM_MAX_NR_PAGES * TARGET_PAGE_SIZE) & SEG_MSK)
> +
>  static CPUWatchpoint hw_watchpoint;
>  /*
>   * We don't use a list because this structure is also used to transmit the
> @@ -347,6 +358,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
>       */
>      /* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */
>  
> +    kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES);
>      return 0;
>  }
>
Igor Mammedov Aug. 27, 2019, 12:56 p.m. UTC | #2
On Tue, 20 Aug 2019 18:07:27 +0200
Cornelia Huck <cohuck@redhat.com> wrote:

> On Wed,  7 Aug 2019 11:32:41 -0400
> Igor Mammedov <imammedo@redhat.com> wrote:
> 
> > Max memslot size supported by kvm on s390 is 8Tb,
> > move logic of splitting RAM in chunks upto 8T to KVM code.
> > 
> > This way it will hide KVM specific restrictions in KVM code
> > and won't affect baord level design decisions. Which would allow
> > us to avoid misusing memory_region_allocate_system_memory() API
> > and eventually use a single hostmem backend for guest RAM.
> > 
> > Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> > ---
> > v5:
> >   * move computation 'size -= slot_size' inside of loop body
> >           (David Hildenbrand <david@redhat.com>)
> > v4:
> >   * fix compilation issue
> >           (Christian Borntraeger <borntraeger@de.ibm.com>)
> >   * advance HVA along with GPA in kvm_set_phys_mem()
> >           (Christian Borntraeger <borntraeger@de.ibm.com>)
> > 
> > patch prepares only KVM side for switching to single RAM memory region
> > another patch will take care of  dropping manual RAM partitioning in
> > s390 code.  
> 
> I may have lost track a bit -- what is the status of this patch (and
> the series)?

Christian,

could you test it on a host that have sufficient amount of RAM?

PS:
I've only simulated test on smaller host with KVM enabled
(by reducing KVM_SLOT_MAX_BYTES size) to verify that KVM code splits
RAM on smaller chunks.

PS2:
Also considering we decided to drop migration concerns there
is no need to check migration on a large enough host anymore.


> 
> > ---
> >  include/sysemu/kvm_int.h   |  1 +
> >  accel/kvm/kvm-all.c        | 82 ++++++++++++++++++++++++--------------
> >  hw/s390x/s390-virtio-ccw.c |  9 -----
> >  target/s390x/kvm.c         | 12 ++++++
> >  4 files changed, 64 insertions(+), 40 deletions(-)
> > 
> > diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
> > index 31df465fdc..7f7520bce2 100644
> > --- a/include/sysemu/kvm_int.h
> > +++ b/include/sysemu/kvm_int.h
> > @@ -41,4 +41,5 @@ typedef struct KVMMemoryListener {
> >  void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
> >                                    AddressSpace *as, int as_id);
> >  
> > +void kvm_set_max_memslot_size(hwaddr max_slot_size);
> >  #endif
> > diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
> > index f450f25295..8153556335 100644
> > --- a/accel/kvm/kvm-all.c
> > +++ b/accel/kvm/kvm-all.c
> > @@ -138,6 +138,7 @@ bool kvm_direct_msi_allowed;
> >  bool kvm_ioeventfd_any_length_allowed;
> >  bool kvm_msi_use_devid;
> >  static bool kvm_immediate_exit;
> > +static hwaddr kvm_max_slot_size = ~0;
> >  
> >  static const KVMCapabilityInfo kvm_required_capabilites[] = {
> >      KVM_CAP_INFO(USER_MEMORY),
> > @@ -951,6 +952,14 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
> >      return NULL;
> >  }
> >  
> > +void kvm_set_max_memslot_size(hwaddr max_slot_size)
> > +{
> > +    g_assert(
> > +        ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
> > +    );
> > +    kvm_max_slot_size = max_slot_size;
> > +}
> > +
> >  static void kvm_set_phys_mem(KVMMemoryListener *kml,
> >                               MemoryRegionSection *section, bool add)
> >  {
> > @@ -958,7 +967,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
> >      int err;
> >      MemoryRegion *mr = section->mr;
> >      bool writeable = !mr->readonly && !mr->rom_device;
> > -    hwaddr start_addr, size;
> > +    hwaddr start_addr, size, slot_size;
> >      void *ram;
> >  
> >      if (!memory_region_is_ram(mr)) {
> > @@ -983,41 +992,52 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
> >      kvm_slots_lock(kml);
> >  
> >      if (!add) {
> > -        mem = kvm_lookup_matching_slot(kml, start_addr, size);
> > -        if (!mem) {
> > -            goto out;
> > -        }
> > -        if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
> > -            kvm_physical_sync_dirty_bitmap(kml, section);
> > -        }
> > +        do {
> > +            slot_size = kvm_max_slot_size < size ? kvm_max_slot_size : size;
> > +            mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
> > +            if (!mem) {
> > +                goto out;
> > +            }
> > +            if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
> > +                kvm_physical_sync_dirty_bitmap(kml, section);
> > +            }
> >  
> > -        /* unregister the slot */
> > -        g_free(mem->dirty_bmap);
> > -        mem->dirty_bmap = NULL;
> > -        mem->memory_size = 0;
> > -        mem->flags = 0;
> > -        err = kvm_set_user_memory_region(kml, mem, false);
> > -        if (err) {
> > -            fprintf(stderr, "%s: error unregistering slot: %s\n",
> > -                    __func__, strerror(-err));
> > -            abort();
> > -        }
> > +            /* unregister the slot */
> > +            g_free(mem->dirty_bmap);
> > +            mem->dirty_bmap = NULL;
> > +            mem->memory_size = 0;
> > +            mem->flags = 0;
> > +            err = kvm_set_user_memory_region(kml, mem, false);
> > +            if (err) {
> > +                fprintf(stderr, "%s: error unregistering slot: %s\n",
> > +                        __func__, strerror(-err));
> > +                abort();
> > +            }
> > +            start_addr += slot_size;
> > +            size -= slot_size;
> > +        } while (size);
> >          goto out;
> >      }
> >  
> >      /* register the new slot */
> > -    mem = kvm_alloc_slot(kml);
> > -    mem->memory_size = size;
> > -    mem->start_addr = start_addr;
> > -    mem->ram = ram;
> > -    mem->flags = kvm_mem_flags(mr);
> > -
> > -    err = kvm_set_user_memory_region(kml, mem, true);
> > -    if (err) {
> > -        fprintf(stderr, "%s: error registering slot: %s\n", __func__,
> > -                strerror(-err));
> > -        abort();
> > -    }
> > +    do {
> > +        slot_size = kvm_max_slot_size < size ? kvm_max_slot_size : size;
> > +        mem = kvm_alloc_slot(kml);
> > +        mem->memory_size = slot_size;
> > +        mem->start_addr = start_addr;
> > +        mem->ram = ram;
> > +        mem->flags = kvm_mem_flags(mr);
> > +
> > +        err = kvm_set_user_memory_region(kml, mem, true);
> > +        if (err) {
> > +            fprintf(stderr, "%s: error registering slot: %s\n", __func__,
> > +                    strerror(-err));
> > +            abort();
> > +        }
> > +        start_addr += slot_size;
> > +        ram += slot_size;
> > +        size -= slot_size;
> > +    } while (size);
> >  
> >  out:
> >      kvm_slots_unlock(kml);
> > diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
> > index 5b6a9a4e55..0c03ffb7c7 100644
> > --- a/hw/s390x/s390-virtio-ccw.c
> > +++ b/hw/s390x/s390-virtio-ccw.c
> > @@ -151,15 +151,6 @@ static void virtio_ccw_register_hcalls(void)
> >                                     virtio_ccw_hcall_early_printk);
> >  }
> >  
> > -/*
> > - * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
> > - * as the dirty bitmap must be managed by bitops that take an int as
> > - * position indicator. If we have a guest beyond that we will split off
> > - * new subregions. The split must happen on a segment boundary (1MB).
> > - */
> > -#define KVM_MEM_MAX_NR_PAGES ((1ULL << 31) - 1)
> > -#define SEG_MSK (~0xfffffULL)
> > -#define KVM_SLOT_MAX_BYTES ((KVM_MEM_MAX_NR_PAGES * TARGET_PAGE_SIZE) & SEG_MSK)
> >  static void s390_memory_init(ram_addr_t mem_size)
> >  {
> >      MemoryRegion *sysmem = get_system_memory();
> > diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
> > index 6e814c230b..6b1428a760 100644
> > --- a/target/s390x/kvm.c
> > +++ b/target/s390x/kvm.c
> > @@ -28,6 +28,7 @@
> >  #include "cpu.h"
> >  #include "internal.h"
> >  #include "kvm_s390x.h"
> > +#include "sysemu/kvm_int.h"
> >  #include "qapi/error.h"
> >  #include "qemu/error-report.h"
> >  #include "qemu/timer.h"
> > @@ -122,6 +123,16 @@
> >  #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \
> >                                       (max_cpus + NR_LOCAL_IRQS))
> >  
> > +/*
> > + * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
> > + * as the dirty bitmap must be managed by bitops that take an int as
> > + * position indicator. If we have a guest beyond that we will split off
> > + * new subregions. The split must happen on a segment boundary (1MB).
> > + */
> > +#define KVM_MEM_MAX_NR_PAGES ((1ULL << 31) - 1)
> > +#define SEG_MSK (~0xfffffULL)
> > +#define KVM_SLOT_MAX_BYTES ((KVM_MEM_MAX_NR_PAGES * TARGET_PAGE_SIZE) & SEG_MSK)
> > +
> >  static CPUWatchpoint hw_watchpoint;
> >  /*
> >   * We don't use a list because this structure is also used to transmit the
> > @@ -347,6 +358,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
> >       */
> >      /* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */
> >  
> > +    kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES);
> >      return 0;
> >  }
> >    
>
Christian Borntraeger Aug. 29, 2019, 6:47 a.m. UTC | #3
On 27.08.19 14:56, Igor Mammedov wrote:
> On Tue, 20 Aug 2019 18:07:27 +0200
> Cornelia Huck <cohuck@redhat.com> wrote:
> 
>> On Wed,  7 Aug 2019 11:32:41 -0400
>> Igor Mammedov <imammedo@redhat.com> wrote:
>>
>>> Max memslot size supported by kvm on s390 is 8Tb,
>>> move logic of splitting RAM in chunks upto 8T to KVM code.
>>>
>>> This way it will hide KVM specific restrictions in KVM code
>>> and won't affect baord level design decisions. Which would allow
>>> us to avoid misusing memory_region_allocate_system_memory() API
>>> and eventually use a single hostmem backend for guest RAM.
>>>
>>> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
>>> ---
>>> v5:
>>>   * move computation 'size -= slot_size' inside of loop body
>>>           (David Hildenbrand <david@redhat.com>)
>>> v4:
>>>   * fix compilation issue
>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
>>>   * advance HVA along with GPA in kvm_set_phys_mem()
>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
>>>
>>> patch prepares only KVM side for switching to single RAM memory region
>>> another patch will take care of  dropping manual RAM partitioning in
>>> s390 code.  
>>
>> I may have lost track a bit -- what is the status of this patch (and
>> the series)?
> 
> Christian,
> 
> could you test it on a host that have sufficient amount of RAM?


This version looks good. I was able to start a 9TB guest.
[pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=0, flags=0, guest_phys_addr=0, memory_size=8796091973632, userspace_addr=0x3ffee700000}) = 0
[pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=1, flags=0, guest_phys_addr=0x7fffff00000, memory_size=1099512676352, userspace_addr=0xbffee600000}) = 0

The only question is if we want to fix the weird alignment (0x7fffff00000) when
we already add a migration barrier for uber-large guests.
Maybe we could split at 4TB to avoid future problem with larger page sizes?
Igor Mammedov Aug. 29, 2019, 12:04 p.m. UTC | #4
On Thu, 29 Aug 2019 08:47:49 +0200
Christian Borntraeger <borntraeger@de.ibm.com> wrote:

> On 27.08.19 14:56, Igor Mammedov wrote:
> > On Tue, 20 Aug 2019 18:07:27 +0200
> > Cornelia Huck <cohuck@redhat.com> wrote:
> >   
> >> On Wed,  7 Aug 2019 11:32:41 -0400
> >> Igor Mammedov <imammedo@redhat.com> wrote:
> >>  
> >>> Max memslot size supported by kvm on s390 is 8Tb,
> >>> move logic of splitting RAM in chunks upto 8T to KVM code.
> >>>
> >>> This way it will hide KVM specific restrictions in KVM code
> >>> and won't affect baord level design decisions. Which would allow
> >>> us to avoid misusing memory_region_allocate_system_memory() API
> >>> and eventually use a single hostmem backend for guest RAM.
> >>>
> >>> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> >>> ---
> >>> v5:
> >>>   * move computation 'size -= slot_size' inside of loop body
> >>>           (David Hildenbrand <david@redhat.com>)
> >>> v4:
> >>>   * fix compilation issue
> >>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
> >>>   * advance HVA along with GPA in kvm_set_phys_mem()
> >>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
> >>>
> >>> patch prepares only KVM side for switching to single RAM memory region
> >>> another patch will take care of  dropping manual RAM partitioning in
> >>> s390 code.    
> >>
> >> I may have lost track a bit -- what is the status of this patch (and
> >> the series)?  
> > 
> > Christian,
> > 
> > could you test it on a host that have sufficient amount of RAM?  
> 
> 
> This version looks good. I was able to start a 9TB guest.
> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=0, flags=0, guest_phys_addr=0, memory_size=8796091973632, userspace_addr=0x3ffee700000}) = 0
> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=1, flags=0, guest_phys_addr=0x7fffff00000, memory_size=1099512676352, userspace_addr=0xbffee600000}) = 0
>
> The only question is if we want to fix the weird alignment (0x7fffff00000) when
> we already add a migration barrier for uber-large guests.
> Maybe we could split at 4TB to avoid future problem with larger page sizes?
That probably should be a separate patch on top.
Christian Borntraeger Aug. 29, 2019, 12:07 p.m. UTC | #5
On 29.08.19 14:04, Igor Mammedov wrote:
> On Thu, 29 Aug 2019 08:47:49 +0200
> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> 
>> On 27.08.19 14:56, Igor Mammedov wrote:
>>> On Tue, 20 Aug 2019 18:07:27 +0200
>>> Cornelia Huck <cohuck@redhat.com> wrote:
>>>   
>>>> On Wed,  7 Aug 2019 11:32:41 -0400
>>>> Igor Mammedov <imammedo@redhat.com> wrote:
>>>>  
>>>>> Max memslot size supported by kvm on s390 is 8Tb,
>>>>> move logic of splitting RAM in chunks upto 8T to KVM code.
>>>>>
>>>>> This way it will hide KVM specific restrictions in KVM code
>>>>> and won't affect baord level design decisions. Which would allow
>>>>> us to avoid misusing memory_region_allocate_system_memory() API
>>>>> and eventually use a single hostmem backend for guest RAM.
>>>>>
>>>>> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
>>>>> ---
>>>>> v5:
>>>>>   * move computation 'size -= slot_size' inside of loop body
>>>>>           (David Hildenbrand <david@redhat.com>)
>>>>> v4:
>>>>>   * fix compilation issue
>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
>>>>>   * advance HVA along with GPA in kvm_set_phys_mem()
>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
>>>>>
>>>>> patch prepares only KVM side for switching to single RAM memory region
>>>>> another patch will take care of  dropping manual RAM partitioning in
>>>>> s390 code.    
>>>>
>>>> I may have lost track a bit -- what is the status of this patch (and
>>>> the series)?  
>>>
>>> Christian,
>>>
>>> could you test it on a host that have sufficient amount of RAM?  
>>
>>
>> This version looks good. I was able to start a 9TB guest.
>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=0, flags=0, guest_phys_addr=0, memory_size=8796091973632, userspace_addr=0x3ffee700000}) = 0
>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=1, flags=0, guest_phys_addr=0x7fffff00000, memory_size=1099512676352, userspace_addr=0xbffee600000}) = 0
>>
>> The only question is if we want to fix the weird alignment (0x7fffff00000) when
>> we already add a migration barrier for uber-large guests.
>> Maybe we could split at 4TB to avoid future problem with larger page sizes?
> That probably should be a separate patch on top.

Right. The split in KVM code is transparent to migration and other parts of QEMU, correct?
Igor Mammedov Aug. 29, 2019, 12:31 p.m. UTC | #6
On Thu, 29 Aug 2019 14:07:44 +0200
Christian Borntraeger <borntraeger@de.ibm.com> wrote:

> On 29.08.19 14:04, Igor Mammedov wrote:
> > On Thu, 29 Aug 2019 08:47:49 +0200
> > Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> >   
> >> On 27.08.19 14:56, Igor Mammedov wrote:  
> >>> On Tue, 20 Aug 2019 18:07:27 +0200
> >>> Cornelia Huck <cohuck@redhat.com> wrote:
> >>>     
> >>>> On Wed,  7 Aug 2019 11:32:41 -0400
> >>>> Igor Mammedov <imammedo@redhat.com> wrote:
> >>>>    
> >>>>> Max memslot size supported by kvm on s390 is 8Tb,
> >>>>> move logic of splitting RAM in chunks upto 8T to KVM code.
> >>>>>
> >>>>> This way it will hide KVM specific restrictions in KVM code
> >>>>> and won't affect baord level design decisions. Which would allow
> >>>>> us to avoid misusing memory_region_allocate_system_memory() API
> >>>>> and eventually use a single hostmem backend for guest RAM.
> >>>>>
> >>>>> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> >>>>> ---
> >>>>> v5:
> >>>>>   * move computation 'size -= slot_size' inside of loop body
> >>>>>           (David Hildenbrand <david@redhat.com>)
> >>>>> v4:
> >>>>>   * fix compilation issue
> >>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
> >>>>>   * advance HVA along with GPA in kvm_set_phys_mem()
> >>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
> >>>>>
> >>>>> patch prepares only KVM side for switching to single RAM memory region
> >>>>> another patch will take care of  dropping manual RAM partitioning in
> >>>>> s390 code.      
> >>>>
> >>>> I may have lost track a bit -- what is the status of this patch (and
> >>>> the series)?    
> >>>
> >>> Christian,
> >>>
> >>> could you test it on a host that have sufficient amount of RAM?    
> >>
> >>
> >> This version looks good. I was able to start a 9TB guest.
> >> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=0, flags=0, guest_phys_addr=0, memory_size=8796091973632, userspace_addr=0x3ffee700000}) = 0
> >> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=1, flags=0, guest_phys_addr=0x7fffff00000, memory_size=1099512676352, userspace_addr=0xbffee600000}) = 0
>
> >> The only question is if we want to fix the weird alignment (0x7fffff00000) when
> >> we already add a migration barrier for uber-large guests.
> >> Maybe we could split at 4TB to avoid future problem with larger page sizes?  
> > That probably should be a separate patch on top.  
> 
> Right. The split in KVM code is transparent to migration and other parts of QEMU, correct?

it should not affect other QEMU parts and migration (to my limited understanding of it),
we are passing to KVM memory slots upto KVM_SLOT_MAX_BYTES as we were doing before by
creating several memory regions instead of one as described in [2/2] commit message.

Also could you also test migration of +9Tb guest, to check that nothing where broken by
accident in QEMU migration code?
Christian Borntraeger Aug. 29, 2019, 12:41 p.m. UTC | #7
On 29.08.19 14:31, Igor Mammedov wrote:
> On Thu, 29 Aug 2019 14:07:44 +0200
> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> 
>> On 29.08.19 14:04, Igor Mammedov wrote:
>>> On Thu, 29 Aug 2019 08:47:49 +0200
>>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
>>>   
>>>> On 27.08.19 14:56, Igor Mammedov wrote:  
>>>>> On Tue, 20 Aug 2019 18:07:27 +0200
>>>>> Cornelia Huck <cohuck@redhat.com> wrote:
>>>>>     
>>>>>> On Wed,  7 Aug 2019 11:32:41 -0400
>>>>>> Igor Mammedov <imammedo@redhat.com> wrote:
>>>>>>    
>>>>>>> Max memslot size supported by kvm on s390 is 8Tb,
>>>>>>> move logic of splitting RAM in chunks upto 8T to KVM code.
>>>>>>>
>>>>>>> This way it will hide KVM specific restrictions in KVM code
>>>>>>> and won't affect baord level design decisions. Which would allow
>>>>>>> us to avoid misusing memory_region_allocate_system_memory() API
>>>>>>> and eventually use a single hostmem backend for guest RAM.
>>>>>>>
>>>>>>> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
>>>>>>> ---
>>>>>>> v5:
>>>>>>>   * move computation 'size -= slot_size' inside of loop body
>>>>>>>           (David Hildenbrand <david@redhat.com>)
>>>>>>> v4:
>>>>>>>   * fix compilation issue
>>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
>>>>>>>   * advance HVA along with GPA in kvm_set_phys_mem()
>>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
>>>>>>>
>>>>>>> patch prepares only KVM side for switching to single RAM memory region
>>>>>>> another patch will take care of  dropping manual RAM partitioning in
>>>>>>> s390 code.      
>>>>>>
>>>>>> I may have lost track a bit -- what is the status of this patch (and
>>>>>> the series)?    
>>>>>
>>>>> Christian,
>>>>>
>>>>> could you test it on a host that have sufficient amount of RAM?    
>>>>
>>>>
>>>> This version looks good. I was able to start a 9TB guest.
>>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=0, flags=0, guest_phys_addr=0, memory_size=8796091973632, userspace_addr=0x3ffee700000}) = 0
>>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=1, flags=0, guest_phys_addr=0x7fffff00000, memory_size=1099512676352, userspace_addr=0xbffee600000}) = 0
>>
>>>> The only question is if we want to fix the weird alignment (0x7fffff00000) when
>>>> we already add a migration barrier for uber-large guests.
>>>> Maybe we could split at 4TB to avoid future problem with larger page sizes?  
>>> That probably should be a separate patch on top.  
>>
>> Right. The split in KVM code is transparent to migration and other parts of QEMU, correct?
> 
> it should not affect other QEMU parts and migration (to my limited understanding of it),
> we are passing to KVM memory slots upto KVM_SLOT_MAX_BYTES as we were doing before by
> creating several memory regions instead of one as described in [2/2] commit message.
> 
> Also could you also test migration of +9Tb guest, to check that nothing where broken by
> accident in QEMU migration code?

I only have one server that is large enough :-/
Igor Mammedov Aug. 30, 2019, 9:41 a.m. UTC | #8
On Thu, 29 Aug 2019 14:41:13 +0200
Christian Borntraeger <borntraeger@de.ibm.com> wrote:

> On 29.08.19 14:31, Igor Mammedov wrote:
> > On Thu, 29 Aug 2019 14:07:44 +0200
> > Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> >   
> >> On 29.08.19 14:04, Igor Mammedov wrote:  
> >>> On Thu, 29 Aug 2019 08:47:49 +0200
> >>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> >>>     
> >>>> On 27.08.19 14:56, Igor Mammedov wrote:    
> >>>>> On Tue, 20 Aug 2019 18:07:27 +0200
> >>>>> Cornelia Huck <cohuck@redhat.com> wrote:
> >>>>>       
> >>>>>> On Wed,  7 Aug 2019 11:32:41 -0400
> >>>>>> Igor Mammedov <imammedo@redhat.com> wrote:
> >>>>>>      
> >>>>>>> Max memslot size supported by kvm on s390 is 8Tb,
> >>>>>>> move logic of splitting RAM in chunks upto 8T to KVM code.
> >>>>>>>
> >>>>>>> This way it will hide KVM specific restrictions in KVM code
> >>>>>>> and won't affect baord level design decisions. Which would allow
> >>>>>>> us to avoid misusing memory_region_allocate_system_memory() API
> >>>>>>> and eventually use a single hostmem backend for guest RAM.
> >>>>>>>
> >>>>>>> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> >>>>>>> ---
> >>>>>>> v5:
> >>>>>>>   * move computation 'size -= slot_size' inside of loop body
> >>>>>>>           (David Hildenbrand <david@redhat.com>)
> >>>>>>> v4:
> >>>>>>>   * fix compilation issue
> >>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
> >>>>>>>   * advance HVA along with GPA in kvm_set_phys_mem()
> >>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
> >>>>>>>
> >>>>>>> patch prepares only KVM side for switching to single RAM memory region
> >>>>>>> another patch will take care of  dropping manual RAM partitioning in
> >>>>>>> s390 code.        
> >>>>>>
> >>>>>> I may have lost track a bit -- what is the status of this patch (and
> >>>>>> the series)?      
> >>>>>
> >>>>> Christian,
> >>>>>
> >>>>> could you test it on a host that have sufficient amount of RAM?      
> >>>>
> >>>>
> >>>> This version looks good. I was able to start a 9TB guest.
> >>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=0, flags=0, guest_phys_addr=0, memory_size=8796091973632, userspace_addr=0x3ffee700000}) = 0
> >>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=1, flags=0, guest_phys_addr=0x7fffff00000, memory_size=1099512676352, userspace_addr=0xbffee600000}) = 0  
> >>  
> >>>> The only question is if we want to fix the weird alignment (0x7fffff00000) when
> >>>> we already add a migration barrier for uber-large guests.
> >>>> Maybe we could split at 4TB to avoid future problem with larger page sizes?    
> >>> That probably should be a separate patch on top.    
> >>
> >> Right. The split in KVM code is transparent to migration and other parts of QEMU, correct?  
> > 
> > it should not affect other QEMU parts and migration (to my limited understanding of it),
> > we are passing to KVM memory slots upto KVM_SLOT_MAX_BYTES as we were doing before by
> > creating several memory regions instead of one as described in [2/2] commit message.
> > 
> > Also could you also test migration of +9Tb guest, to check that nothing where broken by
> > accident in QEMU migration code?  
> 
> I only have one server that is large enough :-/
Could you test offline migration on it (to a file and restore from it)?
Christian Borntraeger Aug. 30, 2019, 4:19 p.m. UTC | #9
On 30.08.19 11:41, Igor Mammedov wrote:
> On Thu, 29 Aug 2019 14:41:13 +0200
> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> 
>> On 29.08.19 14:31, Igor Mammedov wrote:
>>> On Thu, 29 Aug 2019 14:07:44 +0200
>>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
>>>   
>>>> On 29.08.19 14:04, Igor Mammedov wrote:  
>>>>> On Thu, 29 Aug 2019 08:47:49 +0200
>>>>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
>>>>>     
>>>>>> On 27.08.19 14:56, Igor Mammedov wrote:    
>>>>>>> On Tue, 20 Aug 2019 18:07:27 +0200
>>>>>>> Cornelia Huck <cohuck@redhat.com> wrote:
>>>>>>>       
>>>>>>>> On Wed,  7 Aug 2019 11:32:41 -0400
>>>>>>>> Igor Mammedov <imammedo@redhat.com> wrote:
>>>>>>>>      
>>>>>>>>> Max memslot size supported by kvm on s390 is 8Tb,
>>>>>>>>> move logic of splitting RAM in chunks upto 8T to KVM code.
>>>>>>>>>
>>>>>>>>> This way it will hide KVM specific restrictions in KVM code
>>>>>>>>> and won't affect baord level design decisions. Which would allow
>>>>>>>>> us to avoid misusing memory_region_allocate_system_memory() API
>>>>>>>>> and eventually use a single hostmem backend for guest RAM.
>>>>>>>>>
>>>>>>>>> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
>>>>>>>>> ---
>>>>>>>>> v5:
>>>>>>>>>   * move computation 'size -= slot_size' inside of loop body
>>>>>>>>>           (David Hildenbrand <david@redhat.com>)
>>>>>>>>> v4:
>>>>>>>>>   * fix compilation issue
>>>>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
>>>>>>>>>   * advance HVA along with GPA in kvm_set_phys_mem()
>>>>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
>>>>>>>>>
>>>>>>>>> patch prepares only KVM side for switching to single RAM memory region
>>>>>>>>> another patch will take care of  dropping manual RAM partitioning in
>>>>>>>>> s390 code.        
>>>>>>>>
>>>>>>>> I may have lost track a bit -- what is the status of this patch (and
>>>>>>>> the series)?      
>>>>>>>
>>>>>>> Christian,
>>>>>>>
>>>>>>> could you test it on a host that have sufficient amount of RAM?      
>>>>>>
>>>>>>
>>>>>> This version looks good. I was able to start a 9TB guest.
>>>>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=0, flags=0, guest_phys_addr=0, memory_size=8796091973632, userspace_addr=0x3ffee700000}) = 0
>>>>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=1, flags=0, guest_phys_addr=0x7fffff00000, memory_size=1099512676352, userspace_addr=0xbffee600000}) = 0  
>>>>  
>>>>>> The only question is if we want to fix the weird alignment (0x7fffff00000) when
>>>>>> we already add a migration barrier for uber-large guests.
>>>>>> Maybe we could split at 4TB to avoid future problem with larger page sizes?    
>>>>> That probably should be a separate patch on top.    
>>>>
>>>> Right. The split in KVM code is transparent to migration and other parts of QEMU, correct?  
>>>
>>> it should not affect other QEMU parts and migration (to my limited understanding of it),
>>> we are passing to KVM memory slots upto KVM_SLOT_MAX_BYTES as we were doing before by
>>> creating several memory regions instead of one as described in [2/2] commit message.
>>>
>>> Also could you also test migration of +9Tb guest, to check that nothing where broken by
>>> accident in QEMU migration code?  
>>
>> I only have one server that is large enough :-/
> Could you test offline migration on it (to a file and restore from it)?

I tested migration with a hacked QEMU (basically split in KVM code at 1GB instead of 8TB) and
the restore from file failed with data corruption in the guest. The current code
does work when I use small memslots. No idea yet what is wrong.
Igor Mammedov Sept. 2, 2019, 1:49 p.m. UTC | #10
On Fri, 30 Aug 2019 18:19:29 +0200
Christian Borntraeger <borntraeger@de.ibm.com> wrote:

> On 30.08.19 11:41, Igor Mammedov wrote:
> > On Thu, 29 Aug 2019 14:41:13 +0200
> > Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> >   
> >> On 29.08.19 14:31, Igor Mammedov wrote:  
> >>> On Thu, 29 Aug 2019 14:07:44 +0200
> >>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> >>>     
> >>>> On 29.08.19 14:04, Igor Mammedov wrote:    
> >>>>> On Thu, 29 Aug 2019 08:47:49 +0200
> >>>>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> >>>>>       
> >>>>>> On 27.08.19 14:56, Igor Mammedov wrote:      
> >>>>>>> On Tue, 20 Aug 2019 18:07:27 +0200
> >>>>>>> Cornelia Huck <cohuck@redhat.com> wrote:
> >>>>>>>         
> >>>>>>>> On Wed,  7 Aug 2019 11:32:41 -0400
> >>>>>>>> Igor Mammedov <imammedo@redhat.com> wrote:
> >>>>>>>>        
> >>>>>>>>> Max memslot size supported by kvm on s390 is 8Tb,
> >>>>>>>>> move logic of splitting RAM in chunks upto 8T to KVM code.
> >>>>>>>>>
> >>>>>>>>> This way it will hide KVM specific restrictions in KVM code
> >>>>>>>>> and won't affect baord level design decisions. Which would allow
> >>>>>>>>> us to avoid misusing memory_region_allocate_system_memory() API
> >>>>>>>>> and eventually use a single hostmem backend for guest RAM.
> >>>>>>>>>
> >>>>>>>>> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> >>>>>>>>> ---
> >>>>>>>>> v5:
> >>>>>>>>>   * move computation 'size -= slot_size' inside of loop body
> >>>>>>>>>           (David Hildenbrand <david@redhat.com>)
> >>>>>>>>> v4:
> >>>>>>>>>   * fix compilation issue
> >>>>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
> >>>>>>>>>   * advance HVA along with GPA in kvm_set_phys_mem()
> >>>>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
> >>>>>>>>>
> >>>>>>>>> patch prepares only KVM side for switching to single RAM memory region
> >>>>>>>>> another patch will take care of  dropping manual RAM partitioning in
> >>>>>>>>> s390 code.          
> >>>>>>>>
> >>>>>>>> I may have lost track a bit -- what is the status of this patch (and
> >>>>>>>> the series)?        
> >>>>>>>
> >>>>>>> Christian,
> >>>>>>>
> >>>>>>> could you test it on a host that have sufficient amount of RAM?        
> >>>>>>
> >>>>>>
> >>>>>> This version looks good. I was able to start a 9TB guest.
> >>>>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=0, flags=0, guest_phys_addr=0, memory_size=8796091973632, userspace_addr=0x3ffee700000}) = 0
> >>>>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=1, flags=0, guest_phys_addr=0x7fffff00000, memory_size=1099512676352, userspace_addr=0xbffee600000}) = 0    
> >>>>    
> >>>>>> The only question is if we want to fix the weird alignment (0x7fffff00000) when
> >>>>>> we already add a migration barrier for uber-large guests.
> >>>>>> Maybe we could split at 4TB to avoid future problem with larger page sizes?      
> >>>>> That probably should be a separate patch on top.      
> >>>>
> >>>> Right. The split in KVM code is transparent to migration and other parts of QEMU, correct?    
> >>>
> >>> it should not affect other QEMU parts and migration (to my limited understanding of it),
> >>> we are passing to KVM memory slots upto KVM_SLOT_MAX_BYTES as we were doing before by
> >>> creating several memory regions instead of one as described in [2/2] commit message.
> >>>
> >>> Also could you also test migration of +9Tb guest, to check that nothing where broken by
> >>> accident in QEMU migration code?    
> >>
> >> I only have one server that is large enough :-/  
> > Could you test offline migration on it (to a file and restore from it)?  
> 
> I tested migration with a hacked QEMU (basically split in KVM code at 1GB instead of 8TB) and
> the restore from file failed with data corruption in the guest. The current code
> does work when I use small memslots. No idea yet what is wrong.

I've tested 2Gb (max, I can test) guest (also hacked up version)
and it worked for me.
How do you test it and detect corruption so I could try to reproduce it locally?
(given it worked before, there is no much hope but I could try)
Christian Borntraeger Sept. 3, 2019, 6:57 a.m. UTC | #11
On 02.09.19 15:49, Igor Mammedov wrote:
> On Fri, 30 Aug 2019 18:19:29 +0200
> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> 
>> On 30.08.19 11:41, Igor Mammedov wrote:
>>> On Thu, 29 Aug 2019 14:41:13 +0200
>>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
>>>   
>>>> On 29.08.19 14:31, Igor Mammedov wrote:  
>>>>> On Thu, 29 Aug 2019 14:07:44 +0200
>>>>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
>>>>>     
>>>>>> On 29.08.19 14:04, Igor Mammedov wrote:    
>>>>>>> On Thu, 29 Aug 2019 08:47:49 +0200
>>>>>>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
>>>>>>>       
>>>>>>>> On 27.08.19 14:56, Igor Mammedov wrote:      
>>>>>>>>> On Tue, 20 Aug 2019 18:07:27 +0200
>>>>>>>>> Cornelia Huck <cohuck@redhat.com> wrote:
>>>>>>>>>         
>>>>>>>>>> On Wed,  7 Aug 2019 11:32:41 -0400
>>>>>>>>>> Igor Mammedov <imammedo@redhat.com> wrote:
>>>>>>>>>>        
>>>>>>>>>>> Max memslot size supported by kvm on s390 is 8Tb,
>>>>>>>>>>> move logic of splitting RAM in chunks upto 8T to KVM code.
>>>>>>>>>>>
>>>>>>>>>>> This way it will hide KVM specific restrictions in KVM code
>>>>>>>>>>> and won't affect baord level design decisions. Which would allow
>>>>>>>>>>> us to avoid misusing memory_region_allocate_system_memory() API
>>>>>>>>>>> and eventually use a single hostmem backend for guest RAM.
>>>>>>>>>>>
>>>>>>>>>>> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
>>>>>>>>>>> ---
>>>>>>>>>>> v5:
>>>>>>>>>>>   * move computation 'size -= slot_size' inside of loop body
>>>>>>>>>>>           (David Hildenbrand <david@redhat.com>)
>>>>>>>>>>> v4:
>>>>>>>>>>>   * fix compilation issue
>>>>>>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
>>>>>>>>>>>   * advance HVA along with GPA in kvm_set_phys_mem()
>>>>>>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
>>>>>>>>>>>
>>>>>>>>>>> patch prepares only KVM side for switching to single RAM memory region
>>>>>>>>>>> another patch will take care of  dropping manual RAM partitioning in
>>>>>>>>>>> s390 code.          
>>>>>>>>>>
>>>>>>>>>> I may have lost track a bit -- what is the status of this patch (and
>>>>>>>>>> the series)?        
>>>>>>>>>
>>>>>>>>> Christian,
>>>>>>>>>
>>>>>>>>> could you test it on a host that have sufficient amount of RAM?        
>>>>>>>>
>>>>>>>>
>>>>>>>> This version looks good. I was able to start a 9TB guest.
>>>>>>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=0, flags=0, guest_phys_addr=0, memory_size=8796091973632, userspace_addr=0x3ffee700000}) = 0
>>>>>>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=1, flags=0, guest_phys_addr=0x7fffff00000, memory_size=1099512676352, userspace_addr=0xbffee600000}) = 0    
>>>>>>    
>>>>>>>> The only question is if we want to fix the weird alignment (0x7fffff00000) when
>>>>>>>> we already add a migration barrier for uber-large guests.
>>>>>>>> Maybe we could split at 4TB to avoid future problem with larger page sizes?      
>>>>>>> That probably should be a separate patch on top.      
>>>>>>
>>>>>> Right. The split in KVM code is transparent to migration and other parts of QEMU, correct?    
>>>>>
>>>>> it should not affect other QEMU parts and migration (to my limited understanding of it),
>>>>> we are passing to KVM memory slots upto KVM_SLOT_MAX_BYTES as we were doing before by
>>>>> creating several memory regions instead of one as described in [2/2] commit message.
>>>>>
>>>>> Also could you also test migration of +9Tb guest, to check that nothing where broken by
>>>>> accident in QEMU migration code?    
>>>>
>>>> I only have one server that is large enough :-/  
>>> Could you test offline migration on it (to a file and restore from it)?  
>>
>> I tested migration with a hacked QEMU (basically split in KVM code at 1GB instead of 8TB) and
>> the restore from file failed with data corruption in the guest. The current code
>> does work when I use small memslots. No idea yet what is wrong.
> 
> I've tested 2Gb (max, I can test) guest (also hacked up version)
> and it worked for me.
> How do you test it and detect corruption so I could try to reproduce it locally?
> (given it worked before, there is no much hope but I could try)

I basically started a guest with just kernel and ramdisk on the command line and
then in the monitor I did 
migrate "exec: cat > savefile"
and then I restarted the guest with
-incoming "exec: cat savefile"

the guest then very quickly crashed with random kernel oopses. 

Using libvirts managedsave should work as well.
Igor Mammedov Sept. 16, 2019, 1:16 p.m. UTC | #12
On Tue, 3 Sep 2019 08:57:38 +0200
Christian Borntraeger <borntraeger@de.ibm.com> wrote:

> On 02.09.19 15:49, Igor Mammedov wrote:
> > On Fri, 30 Aug 2019 18:19:29 +0200
> > Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> >   
> >> On 30.08.19 11:41, Igor Mammedov wrote:  
> >>> On Thu, 29 Aug 2019 14:41:13 +0200
> >>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> >>>     
> >>>> On 29.08.19 14:31, Igor Mammedov wrote:    
> >>>>> On Thu, 29 Aug 2019 14:07:44 +0200
> >>>>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> >>>>>       
> >>>>>> On 29.08.19 14:04, Igor Mammedov wrote:      
> >>>>>>> On Thu, 29 Aug 2019 08:47:49 +0200
> >>>>>>> Christian Borntraeger <borntraeger@de.ibm.com> wrote:
> >>>>>>>         
> >>>>>>>> On 27.08.19 14:56, Igor Mammedov wrote:        
> >>>>>>>>> On Tue, 20 Aug 2019 18:07:27 +0200
> >>>>>>>>> Cornelia Huck <cohuck@redhat.com> wrote:
> >>>>>>>>>           
> >>>>>>>>>> On Wed,  7 Aug 2019 11:32:41 -0400
> >>>>>>>>>> Igor Mammedov <imammedo@redhat.com> wrote:
> >>>>>>>>>>          
> >>>>>>>>>>> Max memslot size supported by kvm on s390 is 8Tb,
> >>>>>>>>>>> move logic of splitting RAM in chunks upto 8T to KVM code.
> >>>>>>>>>>>
> >>>>>>>>>>> This way it will hide KVM specific restrictions in KVM code
> >>>>>>>>>>> and won't affect baord level design decisions. Which would allow
> >>>>>>>>>>> us to avoid misusing memory_region_allocate_system_memory() API
> >>>>>>>>>>> and eventually use a single hostmem backend for guest RAM.
> >>>>>>>>>>>
> >>>>>>>>>>> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> >>>>>>>>>>> ---
> >>>>>>>>>>> v5:
> >>>>>>>>>>>   * move computation 'size -= slot_size' inside of loop body
> >>>>>>>>>>>           (David Hildenbrand <david@redhat.com>)
> >>>>>>>>>>> v4:
> >>>>>>>>>>>   * fix compilation issue
> >>>>>>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
> >>>>>>>>>>>   * advance HVA along with GPA in kvm_set_phys_mem()
> >>>>>>>>>>>           (Christian Borntraeger <borntraeger@de.ibm.com>)
> >>>>>>>>>>>
> >>>>>>>>>>> patch prepares only KVM side for switching to single RAM memory region
> >>>>>>>>>>> another patch will take care of  dropping manual RAM partitioning in
> >>>>>>>>>>> s390 code.            
> >>>>>>>>>>
> >>>>>>>>>> I may have lost track a bit -- what is the status of this patch (and
> >>>>>>>>>> the series)?          
> >>>>>>>>>
> >>>>>>>>> Christian,
> >>>>>>>>>
> >>>>>>>>> could you test it on a host that have sufficient amount of RAM?          
> >>>>>>>>
> >>>>>>>>
> >>>>>>>> This version looks good. I was able to start a 9TB guest.
> >>>>>>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=0, flags=0, guest_phys_addr=0, memory_size=8796091973632, userspace_addr=0x3ffee700000}) = 0
> >>>>>>>> [pid 215723] ioctl(10, KVM_SET_USER_MEMORY_REGION, {slot=1, flags=0, guest_phys_addr=0x7fffff00000, memory_size=1099512676352, userspace_addr=0xbffee600000}) = 0      
> >>>>>>      
> >>>>>>>> The only question is if we want to fix the weird alignment (0x7fffff00000) when
> >>>>>>>> we already add a migration barrier for uber-large guests.
> >>>>>>>> Maybe we could split at 4TB to avoid future problem with larger page sizes?        
> >>>>>>> That probably should be a separate patch on top.        
> >>>>>>
> >>>>>> Right. The split in KVM code is transparent to migration and other parts of QEMU, correct?      
> >>>>>
> >>>>> it should not affect other QEMU parts and migration (to my limited understanding of it),
> >>>>> we are passing to KVM memory slots upto KVM_SLOT_MAX_BYTES as we were doing before by
> >>>>> creating several memory regions instead of one as described in [2/2] commit message.
> >>>>>
> >>>>> Also could you also test migration of +9Tb guest, to check that nothing where broken by
> >>>>> accident in QEMU migration code?      
> >>>>
> >>>> I only have one server that is large enough :-/    
> >>> Could you test offline migration on it (to a file and restore from it)?    
> >>
> >> I tested migration with a hacked QEMU (basically split in KVM code at 1GB instead of 8TB) and
> >> the restore from file failed with data corruption in the guest. The current code
> >> does work when I use small memslots. No idea yet what is wrong.  
> > 
> > I've tested 2Gb (max, I can test) guest (also hacked up version)
> > and it worked for me.
> > How do you test it and detect corruption so I could try to reproduce it locally?
> > (given it worked before, there is no much hope but I could try)  
> 
> I basically started a guest with just kernel and ramdisk on the command line and
> then in the monitor I did 
> migrate "exec: cat > savefile"
> and then I restarted the guest with
> -incoming "exec: cat savefile"
> 
> the guest then very quickly crashed with random kernel oopses. 
Well, I wasn't able to reproduce that. So I've looked at the kvm part of
migration and it turned out migration didn't even start as KVM was
concerned.

Issue was in that migration related kvm code parts assumed 1:1
relation between MemorySection and memslot which isn't true anymore.
I'll respin v6 with that fixed (i.e. make sure that kvm parts can handle
1:n MemorySection:memslots invariant).

PS:
I've tested (1Gb segments hack) pinpong migration (via file) with Fedora 29
in boot loop.
 
> Using libvirts managedsave should work as well. 
>
diff mbox series

Patch

diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
index 31df465fdc..7f7520bce2 100644
--- a/include/sysemu/kvm_int.h
+++ b/include/sysemu/kvm_int.h
@@ -41,4 +41,5 @@  typedef struct KVMMemoryListener {
 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
                                   AddressSpace *as, int as_id);
 
+void kvm_set_max_memslot_size(hwaddr max_slot_size);
 #endif
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index f450f25295..8153556335 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -138,6 +138,7 @@  bool kvm_direct_msi_allowed;
 bool kvm_ioeventfd_any_length_allowed;
 bool kvm_msi_use_devid;
 static bool kvm_immediate_exit;
+static hwaddr kvm_max_slot_size = ~0;
 
 static const KVMCapabilityInfo kvm_required_capabilites[] = {
     KVM_CAP_INFO(USER_MEMORY),
@@ -951,6 +952,14 @@  kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
     return NULL;
 }
 
+void kvm_set_max_memslot_size(hwaddr max_slot_size)
+{
+    g_assert(
+        ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
+    );
+    kvm_max_slot_size = max_slot_size;
+}
+
 static void kvm_set_phys_mem(KVMMemoryListener *kml,
                              MemoryRegionSection *section, bool add)
 {
@@ -958,7 +967,7 @@  static void kvm_set_phys_mem(KVMMemoryListener *kml,
     int err;
     MemoryRegion *mr = section->mr;
     bool writeable = !mr->readonly && !mr->rom_device;
-    hwaddr start_addr, size;
+    hwaddr start_addr, size, slot_size;
     void *ram;
 
     if (!memory_region_is_ram(mr)) {
@@ -983,41 +992,52 @@  static void kvm_set_phys_mem(KVMMemoryListener *kml,
     kvm_slots_lock(kml);
 
     if (!add) {
-        mem = kvm_lookup_matching_slot(kml, start_addr, size);
-        if (!mem) {
-            goto out;
-        }
-        if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
-            kvm_physical_sync_dirty_bitmap(kml, section);
-        }
+        do {
+            slot_size = kvm_max_slot_size < size ? kvm_max_slot_size : size;
+            mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
+            if (!mem) {
+                goto out;
+            }
+            if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+                kvm_physical_sync_dirty_bitmap(kml, section);
+            }
 
-        /* unregister the slot */
-        g_free(mem->dirty_bmap);
-        mem->dirty_bmap = NULL;
-        mem->memory_size = 0;
-        mem->flags = 0;
-        err = kvm_set_user_memory_region(kml, mem, false);
-        if (err) {
-            fprintf(stderr, "%s: error unregistering slot: %s\n",
-                    __func__, strerror(-err));
-            abort();
-        }
+            /* unregister the slot */
+            g_free(mem->dirty_bmap);
+            mem->dirty_bmap = NULL;
+            mem->memory_size = 0;
+            mem->flags = 0;
+            err = kvm_set_user_memory_region(kml, mem, false);
+            if (err) {
+                fprintf(stderr, "%s: error unregistering slot: %s\n",
+                        __func__, strerror(-err));
+                abort();
+            }
+            start_addr += slot_size;
+            size -= slot_size;
+        } while (size);
         goto out;
     }
 
     /* register the new slot */
-    mem = kvm_alloc_slot(kml);
-    mem->memory_size = size;
-    mem->start_addr = start_addr;
-    mem->ram = ram;
-    mem->flags = kvm_mem_flags(mr);
-
-    err = kvm_set_user_memory_region(kml, mem, true);
-    if (err) {
-        fprintf(stderr, "%s: error registering slot: %s\n", __func__,
-                strerror(-err));
-        abort();
-    }
+    do {
+        slot_size = kvm_max_slot_size < size ? kvm_max_slot_size : size;
+        mem = kvm_alloc_slot(kml);
+        mem->memory_size = slot_size;
+        mem->start_addr = start_addr;
+        mem->ram = ram;
+        mem->flags = kvm_mem_flags(mr);
+
+        err = kvm_set_user_memory_region(kml, mem, true);
+        if (err) {
+            fprintf(stderr, "%s: error registering slot: %s\n", __func__,
+                    strerror(-err));
+            abort();
+        }
+        start_addr += slot_size;
+        ram += slot_size;
+        size -= slot_size;
+    } while (size);
 
 out:
     kvm_slots_unlock(kml);
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 5b6a9a4e55..0c03ffb7c7 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -151,15 +151,6 @@  static void virtio_ccw_register_hcalls(void)
                                    virtio_ccw_hcall_early_printk);
 }
 
-/*
- * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
- * as the dirty bitmap must be managed by bitops that take an int as
- * position indicator. If we have a guest beyond that we will split off
- * new subregions. The split must happen on a segment boundary (1MB).
- */
-#define KVM_MEM_MAX_NR_PAGES ((1ULL << 31) - 1)
-#define SEG_MSK (~0xfffffULL)
-#define KVM_SLOT_MAX_BYTES ((KVM_MEM_MAX_NR_PAGES * TARGET_PAGE_SIZE) & SEG_MSK)
 static void s390_memory_init(ram_addr_t mem_size)
 {
     MemoryRegion *sysmem = get_system_memory();
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
index 6e814c230b..6b1428a760 100644
--- a/target/s390x/kvm.c
+++ b/target/s390x/kvm.c
@@ -28,6 +28,7 @@ 
 #include "cpu.h"
 #include "internal.h"
 #include "kvm_s390x.h"
+#include "sysemu/kvm_int.h"
 #include "qapi/error.h"
 #include "qemu/error-report.h"
 #include "qemu/timer.h"
@@ -122,6 +123,16 @@ 
 #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \
                                      (max_cpus + NR_LOCAL_IRQS))
 
+/*
+ * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
+ * as the dirty bitmap must be managed by bitops that take an int as
+ * position indicator. If we have a guest beyond that we will split off
+ * new subregions. The split must happen on a segment boundary (1MB).
+ */
+#define KVM_MEM_MAX_NR_PAGES ((1ULL << 31) - 1)
+#define SEG_MSK (~0xfffffULL)
+#define KVM_SLOT_MAX_BYTES ((KVM_MEM_MAX_NR_PAGES * TARGET_PAGE_SIZE) & SEG_MSK)
+
 static CPUWatchpoint hw_watchpoint;
 /*
  * We don't use a list because this structure is also used to transmit the
@@ -347,6 +358,7 @@  int kvm_arch_init(MachineState *ms, KVMState *s)
      */
     /* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */
 
+    kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES);
     return 0;
 }