@@ -179,7 +179,7 @@ static struct fixed_mtrr_segment fixed_seg_table[] = {
{
.start = 0xc0000,
.end = 0x100000,
- .range_shift = 12, /* 12K */
+ .range_shift = 12, /* 4K */
.range_start = 24,
}
};
@@ -816,6 +816,67 @@ static void kvm_add_mtrr_zap_list(struct kvm *kvm, struct mtrr_zap_range *range)
spin_unlock(&kvm->arch.mtrr_zap_list_lock);
}
+/*
+ * Fixed ranges are only 256 pages in total.
+ * After balancing between reducing overhead of zap multiple ranges
+ * and increasing chances of finding duplicated ranges,
+ * just add fixed mtrr ranges as a whole to the mtrr zap list
+ * if memory type of one of them is not the specified type.
+ */
+static int prepare_zaplist_fixed_mtrr_of_non_type(struct kvm_vcpu *vcpu, u8 type)
+{
+ struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
+ struct mtrr_zap_range *range;
+ int index, seg_end;
+ u8 mem_type;
+
+ for (index = 0; index < KVM_NR_FIXED_MTRR_REGION; index++) {
+ mem_type = mtrr_state->fixed_ranges[index];
+
+ if (mem_type == type)
+ continue;
+
+ range = kmalloc(sizeof(*range), GFP_KERNEL_ACCOUNT);
+ if (!range)
+ return -ENOMEM;
+
+ seg_end = ARRAY_SIZE(fixed_seg_table) - 1;
+ range->start = gpa_to_gfn(fixed_seg_table[0].start);
+ range->end = gpa_to_gfn(fixed_seg_table[seg_end].end);
+ kvm_add_mtrr_zap_list(vcpu->kvm, range);
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Add var mtrr ranges to the mtrr zap list
+ * if its memory type does not equal to type
+ */
+static int prepare_zaplist_var_mtrr_of_non_type(struct kvm_vcpu *vcpu, u8 type)
+{
+ struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
+ struct mtrr_zap_range *range;
+ struct kvm_mtrr_range *tmp;
+ u8 mem_type;
+
+ list_for_each_entry(tmp, &mtrr_state->head, node) {
+ mem_type = tmp->base & 0xff;
+ if (mem_type == type)
+ continue;
+
+ range = kmalloc(sizeof(*range), GFP_KERNEL_ACCOUNT);
+ if (!range)
+ return -ENOMEM;
+
+ var_mtrr_range(tmp, &range->start, &range->end);
+ range->start = gpa_to_gfn(range->start);
+ range->end = gpa_to_gfn(range->end);
+ kvm_add_mtrr_zap_list(vcpu->kvm, range);
+ }
+ return 0;
+}
+
static void kvm_zap_mtrr_zap_list(struct kvm *kvm)
{
struct list_head *head = &kvm->arch.mtrr_zap_list;
@@ -875,7 +936,50 @@ static void kvm_mtrr_zap_gfn_range(struct kvm_vcpu *vcpu,
kvm_zap_gfn_range(vcpu->kvm, gfn_start, gfn_end);
}
+/*
+ * Zap GFN ranges when CR0.CD toggles between 0 and 1.
+ * With noncoherent DMA present,
+ * when CR0.CD=1, TDP memtype is WB or UC + IPAT;
+ * when CR0.CD=0, TDP memtype is determined by guest MTRR.
+ * Therefore, if the cache disabled memtype is different from default memtype
+ * in guest MTRR, everything is zapped;
+ * if the cache disabled memtype is equal to default memtype in guest MTRR,
+ * only MTRR ranges of non-default-memtype are required to be zapped.
+ */
void kvm_zap_gfn_range_on_cd_toggle(struct kvm_vcpu *vcpu)
{
- return kvm_mtrr_zap_gfn_range(vcpu, gpa_to_gfn(0), gpa_to_gfn(~0ULL));
+ struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
+ bool mtrr_enabled = mtrr_is_enabled(mtrr_state);
+ u8 default_type;
+ u8 cd_type;
+ bool ipat;
+
+ kvm_mtrr_get_cd_memory_type(vcpu, &cd_type, &ipat);
+
+ default_type = mtrr_enabled ? mtrr_default_type(mtrr_state) :
+ mtrr_disabled_type(vcpu);
+
+ if (cd_type != default_type || ipat)
+ return kvm_mtrr_zap_gfn_range(vcpu, gpa_to_gfn(0), gpa_to_gfn(~0ULL));
+
+ /*
+ * If mtrr is not enabled, it will go to zap all above if the default
+ * type does not equal to cd_type;
+ * Or it has no need to zap if the default type equals to cd_type.
+ */
+ if (mtrr_enabled) {
+ if (prepare_zaplist_fixed_mtrr_of_non_type(vcpu, default_type))
+ goto fail;
+
+ if (prepare_zaplist_var_mtrr_of_non_type(vcpu, default_type))
+ goto fail;
+
+ kvm_zap_or_wait_mtrr_zap_list(vcpu->kvm);
+ }
+ return;
+fail:
+ kvm_clear_mtrr_zap_list(vcpu->kvm);
+ /* resort to zapping all on failure*/
+ kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
+ return;
}