diff mbox

[v1,1/2] KVM: s390x: some utility functions for migration

Message ID 1516035791-10609-2-git-send-email-imbrenda@linux.vnet.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Claudio Imbrenda Jan. 15, 2018, 5:03 p.m. UTC
These are some utilty functions that will be used later on for storage
attributes migration.

Signed-off-by: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com>
---
 arch/s390/kvm/kvm-s390.c | 40 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)

Comments

David Hildenbrand Jan. 16, 2018, 6:03 p.m. UTC | #1
On 15.01.2018 18:03, Claudio Imbrenda wrote:
> These are some utilty functions that will be used later on for storage
> attributes migration.
> 
> Signed-off-by: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com>
> ---
>  arch/s390/kvm/kvm-s390.c | 40 ++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 40 insertions(+)
> 
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 6f17031..100ea15 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -764,6 +764,14 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
>  		kvm_s390_sync_request(req, vcpu);
>  }
>  
> +static inline unsigned long *_cmma_bitmap(struct kvm_memory_slot *ms)

I think you can get rid of the "_" here. And ususally we use two _ ?

> +{
> +	unsigned long long len;
> +
> +	len = kvm_dirty_bitmap_bytes(ms) / sizeof(*ms->dirty_bitmap);

return (void *) ms->dirty_bitmap + kvm_dirty_bitmap_bytes(ms);

?

> +	return ms->dirty_bitmap + len;
> +}


> +
>  /*
>   * Must be called with kvm->srcu held to avoid races on memslots, and with
>   * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
> @@ -1512,6 +1520,38 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
>  #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
>  
>  /*
> + * Similar to gfn_to_memslot, but returns a memslot also when the address falls
> + * in a hole. In that case a memslot near the hole is returned.
> + */
> +static int gfn_to_memslot_approx(struct kvm *kvm, gfn_t gfn)
> +{
> +	struct kvm_memslots *slots = kvm_memslots(kvm);
> +	int start = 0, end = slots->used_slots;
> +	int slot = atomic_read(&slots->lru_slot);
> +	struct kvm_memory_slot *memslots = slots->memslots;
> +
> +	if (gfn >= memslots[slot].base_gfn &&
> +	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
> +		return slot;
> +
> +	while (start < end) {
> +		slot = start + (end - start) / 2;
> +
> +		if (gfn >= memslots[slot].base_gfn)
> +			end = slot;
> +		else
> +			start = slot + 1;
> +	}
> +
> +	if (gfn >= memslots[start].base_gfn &&
> +	    gfn < memslots[start].base_gfn + memslots[start].npages) {
> +		atomic_set(&slots->lru_slot, start);
> +	}
> +
> +	return start;
> +}

This looks ugly, hope we can avoid this ....

> +
> +/*
>   * This function searches for the next page with dirty CMMA attributes, and
>   * saves the attributes in the buffer up to either the end of the buffer or
>   * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
>
Claudio Imbrenda Jan. 17, 2018, 9:50 a.m. UTC | #2
On Tue, 16 Jan 2018 19:03:00 +0100
David Hildenbrand <david@redhat.com> wrote:

> On 15.01.2018 18:03, Claudio Imbrenda wrote:
> > These are some utilty functions that will be used later on for
> > storage attributes migration.
> > 
> > Signed-off-by: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com>
> > ---
> >  arch/s390/kvm/kvm-s390.c | 40
> > ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40
> > insertions(+)
> > 
> > diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> > index 6f17031..100ea15 100644
> > --- a/arch/s390/kvm/kvm-s390.c
> > +++ b/arch/s390/kvm/kvm-s390.c
> > @@ -764,6 +764,14 @@ static void
> > kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
> > kvm_s390_sync_request(req, vcpu); }
> >  
> > +static inline unsigned long *_cmma_bitmap(struct kvm_memory_slot
> > *ms)  
> 
> I think you can get rid of the "_" here. And ususally we use two _ ?

will fix

> > +{
> > +	unsigned long long len;
> > +
> > +	len = kvm_dirty_bitmap_bytes(ms) /
> > sizeof(*ms->dirty_bitmap);  
> 
> return (void *) ms->dirty_bitmap + kvm_dirty_bitmap_bytes(ms);
> 
> ?

I really don't like pointer arithmetic on void pointers, especially
when the base pointer we are working with is already of the correct
type. (also, it's an extension, standard C doesn't even allow it)

do you really think it improves readability that much?

> > +	return ms->dirty_bitmap + len;
> > +}  
> 
> 
> > +
> >  /*
> >   * Must be called with kvm->srcu held to avoid races on memslots,
> > and with
> >   * kvm->lock to avoid races with ourselves and
> > kvm_s390_vm_stop_migration. @@ -1512,6 +1520,38 @@ static long
> > kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
> > #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX) 
> >  /*
> > + * Similar to gfn_to_memslot, but returns a memslot also when the
> > address falls
> > + * in a hole. In that case a memslot near the hole is returned.
> > + */
> > +static int gfn_to_memslot_approx(struct kvm *kvm, gfn_t gfn)
> > +{
> > +	struct kvm_memslots *slots = kvm_memslots(kvm);
> > +	int start = 0, end = slots->used_slots;
> > +	int slot = atomic_read(&slots->lru_slot);
> > +	struct kvm_memory_slot *memslots = slots->memslots;
> > +
> > +	if (gfn >= memslots[slot].base_gfn &&
> > +	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
> > +		return slot;
> > +
> > +	while (start < end) {
> > +		slot = start + (end - start) / 2;
> > +
> > +		if (gfn >= memslots[slot].base_gfn)
> > +			end = slot;
> > +		else
> > +			start = slot + 1;
> > +	}
> > +
> > +	if (gfn >= memslots[start].base_gfn &&
> > +	    gfn < memslots[start].base_gfn +
> > memslots[start].npages) {
> > +		atomic_set(&slots->lru_slot, start);
> > +	}
> > +
> > +	return start;
> > +}  
> 
> This looks ugly, hope we can avoid this ....

this is actually a copypaste of search_memslots (which is called by
gfn_to_memslot). we need this because the existing functions return
NULL when a slot is not found, but we need to return some memslot also
when the requested address falls in a hole. 

> > +
> > +/*
> >   * This function searches for the next page with dirty CMMA
> > attributes, and
> >   * saves the attributes in the buffer up to either the end of the
> > buffer or
> >   * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits
> > is found; 
> 
>
Christian Borntraeger Jan. 17, 2018, 12:16 p.m. UTC | #3
On 01/16/2018 07:03 PM, David Hildenbrand wrote:
> On 15.01.2018 18:03, Claudio Imbrenda wrote:
>> These are some utilty functions that will be used later on for storage
>> attributes migration.
>>
>> Signed-off-by: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com>
>> ---
>>  arch/s390/kvm/kvm-s390.c | 40 ++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 40 insertions(+)
>>
>> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
>> index 6f17031..100ea15 100644
>> --- a/arch/s390/kvm/kvm-s390.c
>> +++ b/arch/s390/kvm/kvm-s390.c
>> @@ -764,6 +764,14 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
>>  		kvm_s390_sync_request(req, vcpu);
>>  }
>>  
>> +static inline unsigned long *_cmma_bitmap(struct kvm_memory_slot *ms)
> 
> I think you can get rid of the "_" here. And ususally we use two _ ?
> 
>> +{
>> +	unsigned long long len;
>> +
>> +	len = kvm_dirty_bitmap_bytes(ms) / sizeof(*ms->dirty_bitmap);
> 
> return (void *) ms->dirty_bitmap + kvm_dirty_bitmap_bytes(ms);
> 
> ?

Relying on pointer arithmetics for (void *) being 1 is a gcc extension.
If possible I would like to avoid that. 

> 
>> +	return ms->dirty_bitmap + len;
>> +}
> 
> 
>> +
>>  /*
>>   * Must be called with kvm->srcu held to avoid races on memslots, and with
>>   * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
>> @@ -1512,6 +1520,38 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
>>  #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
>>  
>>  /*
>> + * Similar to gfn_to_memslot, but returns a memslot also when the address falls
>> + * in a hole. In that case a memslot near the hole is returned.
>> + */
>> +static int gfn_to_memslot_approx(struct kvm *kvm, gfn_t gfn)
>> +{
>> +	struct kvm_memslots *slots = kvm_memslots(kvm);
>> +	int start = 0, end = slots->used_slots;
>> +	int slot = atomic_read(&slots->lru_slot);
>> +	struct kvm_memory_slot *memslots = slots->memslots;
>> +
>> +	if (gfn >= memslots[slot].base_gfn &&
>> +	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
>> +		return slot;
>> +
>> +	while (start < end) {
>> +		slot = start + (end - start) / 2;
>> +
>> +		if (gfn >= memslots[slot].base_gfn)
>> +			end = slot;
>> +		else
>> +			start = slot + 1;
>> +	}
>> +
>> +	if (gfn >= memslots[start].base_gfn &&
>> +	    gfn < memslots[start].base_gfn + memslots[start].npages) {
>> +		atomic_set(&slots->lru_slot, start);
>> +	}
>> +
>> +	return start;
>> +}
> 
> This looks ugly, hope we can avoid this ....
> 
>> +
>> +/*
>>   * This function searches for the next page with dirty CMMA attributes, and
>>   * saves the attributes in the buffer up to either the end of the buffer or
>>   * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
>>
> 
>
diff mbox

Patch

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6f17031..100ea15 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -764,6 +764,14 @@  static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
 		kvm_s390_sync_request(req, vcpu);
 }
 
+static inline unsigned long *_cmma_bitmap(struct kvm_memory_slot *ms)
+{
+	unsigned long long len;
+
+	len = kvm_dirty_bitmap_bytes(ms) / sizeof(*ms->dirty_bitmap);
+	return ms->dirty_bitmap + len;
+}
+
 /*
  * Must be called with kvm->srcu held to avoid races on memslots, and with
  * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
@@ -1512,6 +1520,38 @@  static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
 
 /*
+ * Similar to gfn_to_memslot, but returns a memslot also when the address falls
+ * in a hole. In that case a memslot near the hole is returned.
+ */
+static int gfn_to_memslot_approx(struct kvm *kvm, gfn_t gfn)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	int start = 0, end = slots->used_slots;
+	int slot = atomic_read(&slots->lru_slot);
+	struct kvm_memory_slot *memslots = slots->memslots;
+
+	if (gfn >= memslots[slot].base_gfn &&
+	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
+		return slot;
+
+	while (start < end) {
+		slot = start + (end - start) / 2;
+
+		if (gfn >= memslots[slot].base_gfn)
+			end = slot;
+		else
+			start = slot + 1;
+	}
+
+	if (gfn >= memslots[start].base_gfn &&
+	    gfn < memslots[start].base_gfn + memslots[start].npages) {
+		atomic_set(&slots->lru_slot, start);
+	}
+
+	return start;
+}
+
+/*
  * This function searches for the next page with dirty CMMA attributes, and
  * saves the attributes in the buffer up to either the end of the buffer or
  * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;