diff mbox

[4/7] kvm mmu: implement necessary data structures for second huge page accounting

Message ID 49CF79A2.9020202@redhat.com (mailing list archive)
State Accepted
Headers show

Commit Message

Avi Kivity March 29, 2009, 1:37 p.m. UTC
Avi Kivity wrote:
> Joerg Roedel wrote:
>> This patch adds the necessary data structures to take care of write
>> protections in place within a second huge page sized page.
>>
>>
>> +#ifdef KVM_PAGES_PER_LHPAGE
>> +    if (npages && !new.hpage_info) {
>> +        int hugepages = npages / KVM_PAGES_PER_LHPAGE;
>> +        if (npages % KVM_PAGES_PER_LHPAGE)
>> +            hugepages++;
>> +        if (base_gfn % KVM_PAGES_PER_LHPAGE)
>> +            hugepages++;
>>   
>
> Consider a slot with base_gfn == 1 and npages == 1.  This will have 
> hugepages == 2, which is wrong.
>
> I think the right calculation is
>
>  ((base_gfn + npages - 1) / N) - (base_gfn / N) + 1
>
> i.e. index of last page, plus one so we can store it.
>
> The small huge page calculation is off as well.
>

I fixed the existing case with

commit 1a967084dbe97a2f4be84139d14e2d958d7ffc46
Author: Avi Kivity <avi@redhat.com>
Date:   Sun Mar 29 16:31:25 2009 +0300

    KVM: MMU: Fix off-by-one calculating large page count
   
    The large page initialization code concludes there are two large 
pages spanned
    by a slot covering 1 (small) page starting at gfn 1.  This is 
incorrect, and
    also results in incorrect write_count initialization in some cases 
(base = 1,
    npages = 513 for example).
   
    Cc: stable@kernel.org
    Signed-off-by: Avi Kivity <avi@redhat.com>

sizeof(*new.lpage_info));
diff mbox

Patch

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8aa3b95..3d31557 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1076,6 +1076,7 @@  int __kvm_set_memory_region(struct kvm *kvm,
        int r;
        gfn_t base_gfn;
        unsigned long npages;
+       int largepages;
        unsigned long i;
        struct kvm_memory_slot *memslot;
        struct kvm_memory_slot old, new;
@@ -1151,11 +1152,8 @@  int __kvm_set_memory_region(struct kvm *kvm,
                        new.userspace_addr = 0;
        }
        if (npages && !new.lpage_info) {
-               int largepages = npages / KVM_PAGES_PER_HPAGE;
-               if (npages % KVM_PAGES_PER_HPAGE)
-                       largepages++;
-               if (base_gfn % KVM_PAGES_PER_HPAGE)
-                       largepages++;
+               largepages = 1 + (base_gfn + npages - 1) / 
KVM_PAGES_PER_HPAGE;
+               largepages -= base_gfn / npages;
 
                new.lpage_info = vmalloc(largepages *