@@ -228,6 +228,8 @@ int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type);
void kvm_remove_all_breakpoints(CPUState *cpu);
int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap);
+unsigned long *tighten_guest_free_page_bmap(unsigned long *bmap);
+unsigned long get_guest_max_pfn(void);
#ifndef _WIN32
int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset);
#endif
@@ -626,3 +626,17 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)
{
return (data - 32) & 0xffff;
}
+
+unsigned long get_guest_max_pfn(void)
+{
+ /* To be done */
+
+ return 0;
+}
+
+unsigned long *tighten_guest_free_page_bmap(unsigned long *bmap)
+{
+ /* To be done */
+
+ return bmap;
+}
@@ -3334,3 +3334,38 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)
{
abort();
}
+
+unsigned long get_guest_max_pfn(void)
+{
+ PCMachineState *pcms = PC_MACHINE(current_machine);
+ ram_addr_t above_4g_mem = pcms->above_4g_mem_size;
+ unsigned long max_pfn;
+
+ if (above_4g_mem) {
+ max_pfn = ((1ULL << 32) + above_4g_mem) >> TARGET_PAGE_BITS;
+ } else {
+ max_pfn = pcms->below_4g_mem_size >> TARGET_PAGE_BITS;
+ }
+
+ return max_pfn;
+}
+
+unsigned long *tighten_guest_free_page_bmap(unsigned long *bmap)
+{
+ PCMachineState *pcms = PC_MACHINE(current_machine);
+ ram_addr_t above_4g_mem = pcms->above_4g_mem_size;
+
+ if (above_4g_mem) {
+ unsigned long *src, *dst, len, pos;
+ ram_addr_t below_4g_mem = pcms->below_4g_mem_size;
+ src = bmap + ((1ULL << 32) >> TARGET_PAGE_BITS) / BITS_PER_LONG;
+ dst = bmap + (below_4g_mem >> TARGET_PAGE_BITS) / BITS_PER_LONG;
+ bitmap_move(dst, src, above_4g_mem >> TARGET_PAGE_BITS);
+
+ pos = (above_4g_mem + below_4g_mem) >> TARGET_PAGE_BITS;
+ len = ((1ULL << 32) - below_4g_mem) >> TARGET_PAGE_BITS;
+ bitmap_clear(bmap, pos, len);
+ }
+
+ return bmap;
+}
@@ -1048,3 +1048,17 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)
{
abort();
}
+
+unsigned long get_guest_max_pfn(void)
+{
+ /* To be done */
+
+ return 0;
+}
+
+unsigned long *tighten_guest_free_page_bmap(unsigned long *bmap)
+{
+ /* To be done */
+
+ return bmap;
+}
@@ -2579,3 +2579,17 @@ int kvmppc_enable_hwrng(void)
return kvmppc_enable_hcall(kvm_state, H_RANDOM);
}
+
+unsigned long get_guest_max_pfn(void)
+{
+ /* To be done */
+
+ return 0;
+}
+
+unsigned long *tighten_guest_free_page_bmap(unsigned long *bmap)
+{
+ /* To be done */
+
+ return bmap;
+}
@@ -2250,3 +2250,17 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)
{
abort();
}
+
+unsigned long get_guest_max_pfn(void)
+{
+ /* To be done */
+
+ return 0;
+}
+
+unsigned long *tighten_guest_free_page_bmap(unsigned long *bmap)
+{
+ /* To be done */
+
+ return bmap;
+}
Add a new function to get the vm's max pfn and a new function to filter out the holes to get a tight free page bitmap. They are implemented on X86, and all the arches should implement them for live migration optimization. Signed-off-by: Liang Li <liang.z.li@intel.com> --- include/sysemu/kvm.h | 2 ++ target-arm/kvm.c | 14 ++++++++++++++ target-i386/kvm.c | 35 +++++++++++++++++++++++++++++++++++ target-mips/kvm.c | 14 ++++++++++++++ target-ppc/kvm.c | 14 ++++++++++++++ target-s390x/kvm.c | 14 ++++++++++++++ 6 files changed, 93 insertions(+)