Message ID | 20230523151217.46427-2-joao.m.martins@oracle.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | hw/vfio: Improve vfio_get_dirty_bitmap() tracepoint | expand |
On 5/23/23 17:12, Joao Martins wrote: > In preparation to including the number of dirty pages in the > vfio_get_dirty_bitmap() tracepoint, return the number of dirty pages in the > range passed by @dirty argument in cpu_physical_memory_set_dirty_lebitmap(). > > For now just set the callers to NULL, no functional change intended. Why not return the number of dirty pages to be consistent with cpu_physical_memory_sync_dirty_bitmap () ? bitmap_count_one() would also give the same result but at the cost of extra loops. Thanks, C. > > Signed-off-by: Joao Martins <joao.m.martins@oracle.com> > --- > accel/kvm/kvm-all.c | 2 +- > hw/vfio/common.c | 4 ++-- > include/exec/ram_addr.h | 10 +++++++++- > 3 files changed, 12 insertions(+), 4 deletions(-) > > diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c > index cf3a88d90e92..1524a34f1786 100644 > --- a/accel/kvm/kvm-all.c > +++ b/accel/kvm/kvm-all.c > @@ -559,7 +559,7 @@ static void kvm_slot_sync_dirty_pages(KVMSlot *slot) > ram_addr_t start = slot->ram_start_offset; > ram_addr_t pages = slot->memory_size / qemu_real_host_page_size(); > > - cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); > + cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages, NULL); > } > > static void kvm_slot_reset_dirty_pages(KVMSlot *slot) > diff --git a/hw/vfio/common.c b/hw/vfio/common.c > index 78358ede2764..dcbf7c574d85 100644 > --- a/hw/vfio/common.c > +++ b/hw/vfio/common.c > @@ -587,7 +587,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container, > ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); > if (!ret) { > cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, > - iotlb->translated_addr, vbmap.pages); > + iotlb->translated_addr, vbmap.pages, NULL); > } else { > error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); > } > @@ -1773,7 +1773,7 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, > } > > cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr, > - vbmap.pages); > + vbmap.pages, NULL); > > trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size, > ram_addr); > diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h > index f4fb6a211175..07bf9e1502b6 100644 > --- a/include/exec/ram_addr.h > +++ b/include/exec/ram_addr.h > @@ -335,7 +335,8 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, > #if !defined(_WIN32) > static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, > ram_addr_t start, > - ram_addr_t pages) > + ram_addr_t pages, > + unsigned long *dirty) > { > unsigned long i, j; > unsigned long page_number, c; > @@ -380,6 +381,10 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, > } > } > > + if (dirty) { > + *dirty += ctpopl(temp); > + } > + > if (tcg_enabled()) { > qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], > temp); > @@ -411,6 +416,9 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, > if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { > total_dirty_pages += ctpopl(c); > } > + if (dirty) { > + *dirty += ctpopl(c); > + } > do { > j = ctzl(c); > c &= ~(1ul << j);
On 23/05/2023 17:07, Cédric Le Goater wrote: > On 5/23/23 17:12, Joao Martins wrote: >> In preparation to including the number of dirty pages in the >> vfio_get_dirty_bitmap() tracepoint, return the number of dirty pages in the >> range passed by @dirty argument in cpu_physical_memory_set_dirty_lebitmap(). >> >> For now just set the callers to NULL, no functional change intended. > > Why not return the number of dirty pages to be consistent with > cpu_physical_memory_sync_dirty_bitmap () ? > Great idea, let me switch to that. Didn't realize sync variant was using the return value as dirty pages. That also avoids some churn in the current callers. > bitmap_count_one() would also give the same result but at the cost > of extra loops. > > Thanks, > > C. > >> >> Signed-off-by: Joao Martins <joao.m.martins@oracle.com> >> --- >> accel/kvm/kvm-all.c | 2 +- >> hw/vfio/common.c | 4 ++-- >> include/exec/ram_addr.h | 10 +++++++++- >> 3 files changed, 12 insertions(+), 4 deletions(-) >> >> diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c >> index cf3a88d90e92..1524a34f1786 100644 >> --- a/accel/kvm/kvm-all.c >> +++ b/accel/kvm/kvm-all.c >> @@ -559,7 +559,7 @@ static void kvm_slot_sync_dirty_pages(KVMSlot *slot) >> ram_addr_t start = slot->ram_start_offset; >> ram_addr_t pages = slot->memory_size / qemu_real_host_page_size(); >> - cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); >> + cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages, >> NULL); >> } >> static void kvm_slot_reset_dirty_pages(KVMSlot *slot) >> diff --git a/hw/vfio/common.c b/hw/vfio/common.c >> index 78358ede2764..dcbf7c574d85 100644 >> --- a/hw/vfio/common.c >> +++ b/hw/vfio/common.c >> @@ -587,7 +587,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container, >> ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); >> if (!ret) { >> cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, >> - iotlb->translated_addr, vbmap.pages); >> + iotlb->translated_addr, vbmap.pages, NULL); >> } else { >> error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); >> } >> @@ -1773,7 +1773,7 @@ static int vfio_get_dirty_bitmap(VFIOContainer >> *container, uint64_t iova, >> } >> cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr, >> - vbmap.pages); >> + vbmap.pages, NULL); >> trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size, >> ram_addr); >> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h >> index f4fb6a211175..07bf9e1502b6 100644 >> --- a/include/exec/ram_addr.h >> +++ b/include/exec/ram_addr.h >> @@ -335,7 +335,8 @@ static inline void >> cpu_physical_memory_set_dirty_range(ram_addr_t start, >> #if !defined(_WIN32) >> static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long >> *bitmap, >> ram_addr_t start, >> - ram_addr_t pages) >> + ram_addr_t pages, >> + unsigned long *dirty) >> { >> unsigned long i, j; >> unsigned long page_number, c; >> @@ -380,6 +381,10 @@ static inline void >> cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, >> } >> } >> + if (dirty) { >> + *dirty += ctpopl(temp); >> + } >> + >> if (tcg_enabled()) { >> qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], >> temp); >> @@ -411,6 +416,9 @@ static inline void >> cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, >> if (unlikely(global_dirty_tracking & >> GLOBAL_DIRTY_DIRTY_RATE)) { >> total_dirty_pages += ctpopl(c); >> } >> + if (dirty) { >> + *dirty += ctpopl(c); >> + } >> do { >> j = ctzl(c); >> c &= ~(1ul << j); >
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index cf3a88d90e92..1524a34f1786 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -559,7 +559,7 @@ static void kvm_slot_sync_dirty_pages(KVMSlot *slot) ram_addr_t start = slot->ram_start_offset; ram_addr_t pages = slot->memory_size / qemu_real_host_page_size(); - cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); + cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages, NULL); } static void kvm_slot_reset_dirty_pages(KVMSlot *slot) diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 78358ede2764..dcbf7c574d85 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -587,7 +587,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container, ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); if (!ret) { cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, - iotlb->translated_addr, vbmap.pages); + iotlb->translated_addr, vbmap.pages, NULL); } else { error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); } @@ -1773,7 +1773,7 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, } cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr, - vbmap.pages); + vbmap.pages, NULL); trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size, ram_addr); diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index f4fb6a211175..07bf9e1502b6 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -335,7 +335,8 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, #if !defined(_WIN32) static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, ram_addr_t start, - ram_addr_t pages) + ram_addr_t pages, + unsigned long *dirty) { unsigned long i, j; unsigned long page_number, c; @@ -380,6 +381,10 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, } } + if (dirty) { + *dirty += ctpopl(temp); + } + if (tcg_enabled()) { qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp); @@ -411,6 +416,9 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { total_dirty_pages += ctpopl(c); } + if (dirty) { + *dirty += ctpopl(c); + } do { j = ctzl(c); c &= ~(1ul << j);
In preparation to including the number of dirty pages in the vfio_get_dirty_bitmap() tracepoint, return the number of dirty pages in the range passed by @dirty argument in cpu_physical_memory_set_dirty_lebitmap(). For now just set the callers to NULL, no functional change intended. Signed-off-by: Joao Martins <joao.m.martins@oracle.com> --- accel/kvm/kvm-all.c | 2 +- hw/vfio/common.c | 4 ++-- include/exec/ram_addr.h | 10 +++++++++- 3 files changed, 12 insertions(+), 4 deletions(-)