@@ -5714,16 +5714,20 @@ static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
const struct kvm_memory_slot *memslot;
struct kvm_memslots *slots;
+ struct rb_node *node;
bool flush = false;
gfn_t start, end;
- int i, bkt;
+ int i, idx;
if (!kvm_memslots_have_rmaps(kvm))
return flush;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
- kvm_for_each_memslot(memslot, bkt, slots) {
+ idx = slots->node_idx;
+
+ kvm_for_each_memslot_in_gfn_range(node, slots, gfn_start, gfn_end) {
+ memslot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
start = max(gfn_start, memslot->base_gfn);
end = min(gfn_end, memslot->base_gfn + memslot->npages);
if (start >= end)
@@ -5747,6 +5751,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
bool flush;
int i;
+ if (WARN_ON_ONCE(gfn_end <= gfn_start))
+ return;
+
write_lock(&kvm->mmu_lock);
kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
@@ -832,6 +832,75 @@ struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
return NULL;
}
+static inline
+struct rb_node *kvm_memslots_gfn_upper_bound(struct kvm_memslots *slots, gfn_t gfn)
+{
+ int idx = slots->node_idx;
+ struct rb_node *node, *result = NULL;
+
+ for (node = slots->gfn_tree.rb_node; node; ) {
+ struct kvm_memory_slot *slot;
+
+ slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
+ if (gfn < slot->base_gfn) {
+ result = node;
+ node = node->rb_left;
+ } else
+ node = node->rb_right;
+ }
+
+ return result;
+}
+
+static inline
+struct rb_node *kvm_for_each_in_gfn_first(struct kvm_memslots *slots, gfn_t start)
+{
+ struct rb_node *node;
+
+ /*
+ * Find the slot with the lowest gfn that can possibly intersect with
+ * the range, so we'll ideally have slot start <= range start
+ */
+ node = kvm_memslots_gfn_upper_bound(slots, start);
+ if (node) {
+ struct rb_node *pnode;
+
+ /*
+ * A NULL previous node means that the very first slot
+ * already has a higher start gfn.
+ * In this case slot start > range start.
+ */
+ pnode = rb_prev(node);
+ if (pnode)
+ node = pnode;
+ } else {
+ /* a NULL node below means no slots */
+ node = rb_last(&slots->gfn_tree);
+ }
+
+ return node;
+}
+
+static inline
+bool kvm_for_each_in_gfn_no_more(struct kvm_memslots *slots, struct rb_node *node, gfn_t end)
+{
+ struct kvm_memory_slot *memslot;
+
+ memslot = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]);
+
+ /*
+ * If this slot starts beyond or at the end of the range so does
+ * every next one
+ */
+ return memslot->base_gfn >= end;
+}
+
+/* Iterate over each memslot *possibly* intersecting [start, end) range */
+#define kvm_for_each_memslot_in_gfn_range(node, slots, start, end) \
+ for (node = kvm_for_each_in_gfn_first(slots, start); \
+ node && !kvm_for_each_in_gfn_no_more(slots, node, end); \
+ node = rb_next(node)) \
+
/*
* KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
* - create a new memory slot