@@ -167,14 +167,23 @@ static void kvm_clear_pte(kvm_pte_t *ptep)
WRITE_ONCE(*ptep, 0);
}
-static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
- struct kvm_pgtable_mm_ops *mm_ops)
+static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
{
- kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
+ kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
pte |= KVM_PTE_VALID;
+ return pte;
+}
+
+static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
+ struct kvm_pgtable_mm_ops *mm_ops)
+{
+ kvm_pte_t pte, old = *ptep;
+
+ pte = kvm_init_table_pte(childp, mm_ops);
+
WARN_ON(kvm_pte_valid(old));
smp_store_release(ptep, pte);
}
@@ -931,7 +940,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
kvm_pte_t *old, struct stage2_map_data *data, bool shared)
{
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
- kvm_pte_t *childp;
+ kvm_pte_t *childp, pte;
int ret;
if (data->anchor) {
@@ -969,9 +978,9 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* a table. Accesses beyond 'end' that fall within the new table
* will be mapped lazily.
*/
- kvm_set_table_pte(ptep, childp, mm_ops);
- mm_ops->get_page(ptep);
- *old = *ptep;
+ pte = kvm_init_table_pte(childp, mm_ops);
+ stage2_make_pte(ptep, pte, data->mm_ops);
+ *old = pte;
return 0;
}
With parallel table walks there is no guarantee that KVM reads back the same pte that was written. Spin off a helper that creates a pte value, thereby allowing the visitor callback to return the next table without reading the ptep again. Signed-off-by: Oliver Upton <oupton@google.com> --- arch/arm64/kvm/hyp/pgtable.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-)