@@ -155,6 +155,8 @@ static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
void free_hyp_memcache(struct kvm_hyp_memcache *mc);
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages, unsigned long order);
+int topup_hyp_memcache_gfp(struct kvm_hyp_memcache *mc, unsigned long min_pages,
+ unsigned long order, gfp_t gfp);
static inline void init_hyp_memcache(struct kvm_hyp_memcache *mc)
{
@@ -1628,6 +1630,8 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
#define HYP_ALLOC_MGT_IOMMU_ID 1
unsigned long __pkvm_reclaim_hyp_alloc_mgt(unsigned long nr_pages);
+int __pkvm_topup_hyp_alloc_mgt_gfp(unsigned long id, unsigned long nr_pages,
+ unsigned long sz_alloc, gfp_t gfp);
struct kvm_iommu_driver {
int (*init_driver)(void);
@@ -1225,6 +1225,11 @@ static void *hyp_mc_alloc_fn(void *flags, unsigned long order)
return addr;
}
+static void *hyp_mc_alloc_gfp_fn(void *flags, unsigned long order)
+{
+ return (void *)__get_free_pages(*(gfp_t *)flags, order);
+}
+
void free_hyp_memcache(struct kvm_hyp_memcache *mc)
{
unsigned long flags = mc->flags;
@@ -1249,6 +1254,21 @@ int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
kvm_host_pa, (void *)flags, order);
}
+int topup_hyp_memcache_gfp(struct kvm_hyp_memcache *mc, unsigned long min_pages,
+ unsigned long order, gfp_t gfp)
+{
+ void *flags = &gfp;
+
+ if (!is_protected_kvm_enabled())
+ return 0;
+
+ if (order > PAGE_SHIFT)
+ return -E2BIG;
+
+ return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_gfp_fn,
+ kvm_host_pa, flags, order);
+}
+
/**
* kvm_phys_addr_ioremap - map a device range to guest IPA
*
@@ -1114,3 +1114,23 @@ unsigned long __pkvm_reclaim_hyp_alloc_mgt(unsigned long nr_pages)
return reclaimed;
}
+
+int __pkvm_topup_hyp_alloc_mgt_gfp(unsigned long id, unsigned long nr_pages,
+ unsigned long sz_alloc, gfp_t gfp)
+{
+ struct kvm_hyp_memcache mc;
+ int ret;
+
+ init_hyp_memcache(&mc);
+
+ ret = topup_hyp_memcache_gfp(&mc, nr_pages, get_order(sz_alloc), gfp);
+ if (ret)
+ return ret;
+
+ ret = kvm_call_hyp_nvhe(__pkvm_hyp_alloc_mgt_refill, id,
+ mc.head, mc.nr_pages);
+ if (ret)
+ free_hyp_memcache(&mc);
+
+ return ret;
+}
Soon, IOMMU driver might need to topup the IOMMU pool from map_pages IOMMU operation, which has a gfp flag is it might be called from atomic context, add a function to topup an allocator with an ID that also accepts gfp flags. Signed-off-by: Mostafa Saleh <smostafa@google.com> --- arch/arm64/include/asm/kvm_host.h | 4 ++++ arch/arm64/kvm/mmu.c | 20 ++++++++++++++++++++ arch/arm64/kvm/pkvm.c | 20 ++++++++++++++++++++ 3 files changed, 44 insertions(+)