@@ -47,6 +47,31 @@ enum prefault_snp_test_type {
PREFAULT_PRIVATE_SHARED_AFTER_FINALIZING
};
+enum falloc_snp_test_type {
+ /* Skip alloc tests. */
+ NO_ALLOC_TYPE,
+ /*
+ * Allocate and/or deallocate a region of guest memfd before
+ * memory regions are updated to be protected and encrypted
+ *
+ * This should succeed since allocation and deallocation is
+ * supported before the memory is finalized.
+ */
+ ALLOC_BEFORE_UPDATE,
+ ALLOC_AFTER_UPDATE,
+ DEALLOC_BEFORE_UPDATE,
+ ALLOC_DEALLOC_BEFORE_UPDATE,
+ /*
+ * Allocate and/or deallocate a region of guest memfd after
+ * memory regions are updated to be protected and encrypted
+ *
+ * This should fail since dealloc will nuke the pages that
+ * contain the initial code that the guest will run.
+ */
+ DEALLOC_AFTER_UPDATE,
+ ALLOC_DEALLOC_AFTER_UPDATE
+};
+
static void guest_code_sev(void)
{
int i;
@@ -73,6 +98,29 @@ static void guest_code_sev(void)
GUEST_DONE();
}
+static void __falloc_region(struct kvm_vm *vm, bool punch_hole)
+{
+ int ctr, ret, flags = FALLOC_FL_KEEP_SIZE;
+ struct userspace_mem_region *region;
+
+ hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
+ if (punch_hole)
+ flags |= FALLOC_FL_PUNCH_HOLE;
+ ret = fallocate(region->region.guest_memfd, flags, 0, PAGE_SIZE * TEST_NPAGES);
+ TEST_ASSERT(!ret, "fallocate should succeed.");
+ }
+}
+
+static void gmemfd_alloc(struct kvm_vm *vm)
+{
+ __falloc_region(vm, false);
+}
+
+static void gmemfd_dealloc(struct kvm_vm *vm)
+{
+ __falloc_region(vm, true);
+}
+
static void __pre_fault_memory(struct kvm_vcpu *vcpu, u64 gpa, u64 size,
u64 left, bool expect_fail)
{
@@ -137,13 +185,34 @@ static void pre_fault_memory_negative(struct kvm_vcpu *vcpu, u64 gpa,
}
static void pre_fault_memory_snp(struct kvm_vcpu *vcpu, struct kvm_vm *vm,
- bool private, enum prefault_snp_test_type p_type)
+ bool private, enum prefault_snp_test_type p_type,
+ enum falloc_snp_test_type f_type)
{
+ if (f_type == ALLOC_BEFORE_UPDATE ||
+ f_type == ALLOC_DEALLOC_BEFORE_UPDATE) {
+ gmemfd_alloc(vm);
+ }
+
+ if (f_type == DEALLOC_BEFORE_UPDATE ||
+ f_type == ALLOC_DEALLOC_BEFORE_UPDATE) {
+ gmemfd_dealloc(vm);
+ }
+
if (p_type == PREFAULT_SHARED_BEFORE_FINALIZING)
pre_fault_memory_negative(vcpu, TEST_GPA, SZ_2M, 0);
snp_vm_launch_start(vm, SNP_POLICY);
+ if (f_type == ALLOC_BEFORE_UPDATE ||
+ f_type == ALLOC_DEALLOC_BEFORE_UPDATE) {
+ gmemfd_alloc(vm);
+ }
+
+ if (f_type == DEALLOC_BEFORE_UPDATE ||
+ f_type == ALLOC_DEALLOC_BEFORE_UPDATE) {
+ gmemfd_dealloc(vm);
+ }
+
if (p_type == PREFAULT_SHARED_BEFORE_FINALIZING)
pre_fault_memory_negative(vcpu, TEST_GPA, SZ_2M, 0);
@@ -164,11 +233,36 @@ static void pre_fault_memory_snp(struct kvm_vcpu *vcpu, struct kvm_vm *vm,
snp_vm_launch_update(vm);
+ if (f_type == ALLOC_AFTER_UPDATE ||
+ f_type == ALLOC_DEALLOC_AFTER_UPDATE) {
+ gmemfd_alloc(vm);
+ }
+
+ /*
+ * Hole-punch after SNP LAUNCH UPDATE is not expected to fail
+ * immediately, rather its affects are observed on vcpu_run()
+ * as the pages that contain the initial code is nuked.
+ */
+ if (f_type == DEALLOC_AFTER_UPDATE ||
+ f_type == ALLOC_DEALLOC_AFTER_UPDATE) {
+ gmemfd_dealloc(vm);
+ }
+
if (p_type == PREFAULT_SHARED_BEFORE_FINALIZING)
pre_fault_memory_negative(vcpu, TEST_GPA, SZ_2M, 0);
snp_vm_launch_finish(vm);
+ if (f_type == ALLOC_AFTER_UPDATE ||
+ f_type == ALLOC_DEALLOC_AFTER_UPDATE) {
+ gmemfd_alloc(vm);
+ }
+
+ if (f_type == DEALLOC_AFTER_UPDATE ||
+ f_type == ALLOC_DEALLOC_AFTER_UPDATE) {
+ gmemfd_dealloc(vm);
+ }
+
/*
* After finalization, pre-faulting either private or shared
* ranges should work regardless of whether the pages were
@@ -210,7 +304,8 @@ static void pre_fault_memory_sev(unsigned long vm_type, struct kvm_vcpu *vcpu,
}
static void test_pre_fault_memory_sev(unsigned long vm_type, bool private,
- enum prefault_snp_test_type p_type)
+ enum prefault_snp_test_type p_type,
+ enum falloc_snp_test_type f_type)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -246,12 +341,22 @@ static void test_pre_fault_memory_sev(unsigned long vm_type, bool private,
}
if (vm_type == KVM_X86_SNP_VM)
- pre_fault_memory_snp(vcpu, vm, private, p_type);
+ pre_fault_memory_snp(vcpu, vm, private, p_type, f_type);
else
pre_fault_memory_sev(vm_type, vcpu, vm);
vcpu_run(vcpu);
+ /* Expect SHUTDOWN when we falloc using PUNCH_HOLE after SNP_UPDATE */
+ if (vm->type == KVM_X86_SNP_VM &&
+ (f_type == DEALLOC_AFTER_UPDATE ||
+ f_type == ALLOC_DEALLOC_AFTER_UPDATE)) {
+ TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Wanted SYSTEM_EVENT, got %s",
+ exit_reason_str(vcpu->run->exit_reason));
+ goto out;
+ }
+
if (vm->type == KVM_X86_SEV_ES_VM || vm->type == KVM_X86_SNP_VM) {
TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
"Wanted SYSTEM_EVENT, got %s",
@@ -278,7 +383,7 @@ static void test_pre_fault_memory_sev(unsigned long vm_type, bool private,
static void test_pre_fault_memory(unsigned long vm_type, bool private)
{
- int pt;
+ int pt, ft;
if (vm_type && !(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(vm_type))) {
pr_info("Skipping tests for vm_type 0x%lx\n", vm_type);
@@ -288,11 +393,13 @@ static void test_pre_fault_memory(unsigned long vm_type, bool private)
switch (vm_type) {
case KVM_X86_SEV_VM:
case KVM_X86_SEV_ES_VM:
- test_pre_fault_memory_sev(vm_type, private, NO_PREFAULT_TYPE);
+ test_pre_fault_memory_sev(vm_type, private, NO_PREFAULT_TYPE, NO_ALLOC_TYPE);
break;
case KVM_X86_SNP_VM:
- for (pt = 0; pt <= PREFAULT_PRIVATE_SHARED_AFTER_FINALIZING; pt++)
- test_pre_fault_memory_sev(vm_type, private, pt);
+ for (pt = 0; pt <= PREFAULT_PRIVATE_SHARED_AFTER_FINALIZING; pt++) {
+ for (ft = 0; ft <= ALLOC_DEALLOC_AFTER_UPDATE; ft++)
+ test_pre_fault_memory_sev(vm_type, private, pt, ft);
+ }
break;
default:
abort();