@@ -290,15 +290,16 @@ static void __set_spte(u64 *sptep, u64 spte)
#endif
}
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
- struct kmem_cache *base_cache, int min)
+static int __mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+ struct kmem_cache *base_cache, int min,
+ int max, gfp_t flags)
{
void *obj;
if (cache->nobjs >= min)
return 0;
- while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
- obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
+ while (cache->nobjs < max) {
+ obj = kmem_cache_zalloc(base_cache, flags);
if (!obj)
return -ENOMEM;
cache->objects[cache->nobjs++] = obj;
@@ -306,6 +307,26 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
return 0;
}
+static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+ struct kmem_cache *base_cache, int min)
+{
+ return __mmu_topup_memory_cache(cache, base_cache, min,
+ ARRAY_SIZE(cache->objects), GFP_KERNEL);
+}
+
+static int mmu_topup_memory_cache_atomic(struct kvm_mmu_memory_cache *cache,
+ struct kmem_cache *base_cache, int min)
+{
+ return __mmu_topup_memory_cache(cache, base_cache, min, min,
+ GFP_ATOMIC);
+}
+
+static int pte_prefetch_topup_memory_cache(struct kvm_vcpu *vcpu)
+{
+ return mmu_topup_memory_cache_atomic(&vcpu->arch.mmu_rmap_desc_cache,
+ rmap_desc_cache, 1);
+}
+
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
struct kmem_cache *cache)
{