@@ -219,14 +219,30 @@ static void guest_s2_put_page(void *addr)
static void clean_dcache_guest_page(void *va, size_t size)
{
- __clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
- hyp_fixmap_unmap();
+ if (WARN_ON(!PAGE_ALIGNED(size)))
+ return;
+
+ while (size) {
+ __clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)),
+ PAGE_SIZE);
+ hyp_fixmap_unmap();
+ va += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
}
static void invalidate_icache_guest_page(void *va, size_t size)
{
- __invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
- hyp_fixmap_unmap();
+ if (WARN_ON(!PAGE_ALIGNED(size)))
+ return;
+
+ while (size) {
+ __invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)),
+ PAGE_SIZE);
+ hyp_fixmap_unmap();
+ va += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
}
int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
clean_dcache_guest_page() and invalidate_icache_guest_page() accept a size as an argument. But they also rely on fixmap, which can only map a single PAGE_SIZE page. With the upcoming stage-2 huge mappings for pKVM np-guests, those callbacks will get size > PAGE_SIZE. Loop the CMOs on PAGE_SIZE basis until the whole range is done. Signed-off-by: Vincent Donnefort <vdonnefort@google.com>