@@ -1783,6 +1783,7 @@ static int kvm_init(MachineState *ms)
}
kvm_state->memcrypt_encrypt_data = sev_encrypt_data;
+ kvm_state->memcrypt_sync_page_enc_bitmap = sev_sync_page_enc_bitmap;
}
ret = kvm_arch_init(ms, s);
@@ -51,6 +51,8 @@ struct RAMBlock {
unsigned long *unsentmap;
/* bitmap of already received pages in postcopy */
unsigned long *receivedmap;
+ /* bitmap of page encryption state for an encrypted guest */
+ unsigned long *encbmap;
};
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
@@ -1680,6 +1680,9 @@ static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
rs->migration_dirty_pages +=
cpu_physical_memory_sync_dirty_bitmap(rb, 0, length,
&rs->num_dirty_pages_period);
+ if (kvm_memcrypt_enabled()) {
+ kvm_memcrypt_sync_page_enc_bitmap(rb->host, length, rb->encbmap);
+ }
}
/**
@@ -2465,6 +2468,22 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
return false;
}
+/**
+ * encrypted_test_bitmap: check if the page is encrypted
+ *
+ * Returns a bool indicating whether the page is encrypted.
+ */
+static bool encrypted_test_bitmap(RAMState *rs, RAMBlock *block,
+ unsigned long page)
+{
+ /* ROM devices contains the unencrypted data */
+ if (memory_region_is_rom(block->mr)) {
+ return false;
+ }
+
+ return test_bit(page, block->encbmap);
+}
+
/**
* ram_save_target_page: save one target page
*
@@ -2491,7 +2510,8 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
* will take care of accessing the guest memory and re-encrypt it
* for the transport purposes.
*/
- if (kvm_memcrypt_enabled()) {
+ if (kvm_memcrypt_enabled() &&
+ encrypted_test_bitmap(rs, pss->block, pss->page)) {
return ram_save_encrypted_page(rs, pss, last_stage);
}
@@ -2724,6 +2744,8 @@ static void ram_save_cleanup(void *opaque)
block->bmap = NULL;
g_free(block->unsentmap);
block->unsentmap = NULL;
+ g_free(block->encbmap);
+ block->encbmap = NULL;
}
xbzrle_cleanup();
@@ -3251,6 +3273,10 @@ static void ram_list_init_bitmaps(void)
block->unsentmap = bitmap_new(pages);
bitmap_set(block->unsentmap, 0, pages);
}
+ if (kvm_memcrypt_enabled()) {
+ block->encbmap = bitmap_new(pages);
+ bitmap_set(block->encbmap, 0, pages);
+ }
}
}
}
@@ -819,6 +819,33 @@ sev_encrypt_data(void *handle, uint8_t *ptr, uint64_t len)
return 0;
}
+int sev_sync_page_enc_bitmap(void *handle, uint8_t *host, uint64_t size,
+ unsigned long *bitmap)
+{
+ int r;
+ unsigned long base_gpa;
+ KVMState *s = kvm_state;
+ struct kvm_page_enc_bitmap e = {};
+ unsigned long pages = size >> TARGET_PAGE_BITS;
+
+ r = kvm_physical_memory_addr_from_host(kvm_state, host, &base_gpa);
+ if (!r) {
+ return 1;
+ }
+
+ e.enc_bitmap = bitmap;
+ e.start = base_gpa >> TARGET_PAGE_BITS;
+ e.num_pages = pages;
+
+ if (kvm_vm_ioctl(s, KVM_GET_PAGE_ENC_BITMAP, &e) == -1) {
+ error_report("%s: get page_enc bitmap start 0x%llx pages 0x%llx",
+ __func__, e.start, e.num_pages);
+ return 1;
+ }
+
+ return 0;
+}
+
static void
sev_register_types(void)
{
The SEV VMs have concept of private and shared memory. The private memory is encrypted with guest-specific key, while shared memory may be encrypted with hyperivosr key. The KVM_GET_PAGE_ENC_BITMAP can be used to get a bitmap indicating whether the guest page is private or shared. A private page must be transmitted using the SEV migration commands. Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> --- accel/kvm/kvm-all.c | 1 + include/exec/ram_addr.h | 2 ++ migration/ram.c | 28 +++++++++++++++++++++++++++- target/i386/sev.c | 27 +++++++++++++++++++++++++++ 4 files changed, 57 insertions(+), 1 deletion(-)