Message ID | 20210809152448.1810400-6-qperret@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Track shared pages at EL2 in protected mode | expand |
Hi Quentin, On Mon, Aug 9, 2021 at 5:25 PM Quentin Perret <qperret@google.com> wrote: > > The KVM pgtable API exposes the kvm_pgtable_walk() function to allow > the definition of walkers outside of pgtable.c. However, it is not easy > to implement any of those walkers without some of the low-level helpers. > Move some of them to the header file to allow re-use from other places. > > Signed-off-by: Quentin Perret <qperret@google.com> Reviewed-by: Fuad Tabba <tabba@google.com> Thanks, /fuad > --- > arch/arm64/include/asm/kvm_pgtable.h | 40 ++++++++++++++++++++++++++++ > arch/arm64/kvm/hyp/pgtable.c | 39 --------------------------- > 2 files changed, 40 insertions(+), 39 deletions(-) > > diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h > index 082b9d65f40b..6938eac72c1f 100644 > --- a/arch/arm64/include/asm/kvm_pgtable.h > +++ b/arch/arm64/include/asm/kvm_pgtable.h > @@ -25,6 +25,46 @@ static inline u64 kvm_get_parange(u64 mmfr0) > > typedef u64 kvm_pte_t; > > +#define KVM_PTE_VALID BIT(0) > + > +#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT) > +#define KVM_PTE_ADDR_51_48 GENMASK(15, 12) > + > +static inline bool kvm_pte_valid(kvm_pte_t pte) > +{ > + return pte & KVM_PTE_VALID; > +} > + > +static inline u64 kvm_pte_to_phys(kvm_pte_t pte) > +{ > + u64 pa = pte & KVM_PTE_ADDR_MASK; > + > + if (PAGE_SHIFT == 16) > + pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48; > + > + return pa; > +} > + > +static inline u64 kvm_granule_shift(u32 level) > +{ > + /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */ > + return ARM64_HW_PGTABLE_LEVEL_SHIFT(level); > +} > + > +static inline u64 kvm_granule_size(u32 level) > +{ > + return BIT(kvm_granule_shift(level)); > +} > + > +static inline bool kvm_level_supports_block_mapping(u32 level) > +{ > + /* > + * Reject invalid block mappings and don't bother with 4TB mappings for > + * 52-bit PAs. > + */ > + return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1)); > +} > + > /** > * struct kvm_pgtable_mm_ops - Memory management callbacks. > * @zalloc_page: Allocate a single zeroed memory page. > diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c > index 78f36bd5df6c..49d768b92997 100644 > --- a/arch/arm64/kvm/hyp/pgtable.c > +++ b/arch/arm64/kvm/hyp/pgtable.c > @@ -11,16 +11,12 @@ > #include <asm/kvm_pgtable.h> > #include <asm/stage2_pgtable.h> > > -#define KVM_PTE_VALID BIT(0) > > #define KVM_PTE_TYPE BIT(1) > #define KVM_PTE_TYPE_BLOCK 0 > #define KVM_PTE_TYPE_PAGE 1 > #define KVM_PTE_TYPE_TABLE 1 > > -#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT) > -#define KVM_PTE_ADDR_51_48 GENMASK(15, 12) > - > #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2) > > #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2) > @@ -61,17 +57,6 @@ struct kvm_pgtable_walk_data { > u64 end; > }; > > -static u64 kvm_granule_shift(u32 level) > -{ > - /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */ > - return ARM64_HW_PGTABLE_LEVEL_SHIFT(level); > -} > - > -static u64 kvm_granule_size(u32 level) > -{ > - return BIT(kvm_granule_shift(level)); > -} > - > #define KVM_PHYS_INVALID (-1ULL) > > static bool kvm_phys_is_valid(u64 phys) > @@ -79,15 +64,6 @@ static bool kvm_phys_is_valid(u64 phys) > return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX)); > } > > -static bool kvm_level_supports_block_mapping(u32 level) > -{ > - /* > - * Reject invalid block mappings and don't bother with 4TB mappings for > - * 52-bit PAs. > - */ > - return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1)); > -} > - > static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) > { > u64 granule = kvm_granule_size(level); > @@ -135,11 +111,6 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) > return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; > } > > -static bool kvm_pte_valid(kvm_pte_t pte) > -{ > - return pte & KVM_PTE_VALID; > -} > - > static bool kvm_pte_table(kvm_pte_t pte, u32 level) > { > if (level == KVM_PGTABLE_MAX_LEVELS - 1) > @@ -151,16 +122,6 @@ static bool kvm_pte_table(kvm_pte_t pte, u32 level) > return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE; > } > > -static u64 kvm_pte_to_phys(kvm_pte_t pte) > -{ > - u64 pa = pte & KVM_PTE_ADDR_MASK; > - > - if (PAGE_SHIFT == 16) > - pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48; > - > - return pa; > -} > - > static kvm_pte_t kvm_phys_to_pte(u64 pa) > { > kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK; > -- > 2.32.0.605.g8dce9f2422-goog >
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 082b9d65f40b..6938eac72c1f 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -25,6 +25,46 @@ static inline u64 kvm_get_parange(u64 mmfr0) typedef u64 kvm_pte_t; +#define KVM_PTE_VALID BIT(0) + +#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT) +#define KVM_PTE_ADDR_51_48 GENMASK(15, 12) + +static inline bool kvm_pte_valid(kvm_pte_t pte) +{ + return pte & KVM_PTE_VALID; +} + +static inline u64 kvm_pte_to_phys(kvm_pte_t pte) +{ + u64 pa = pte & KVM_PTE_ADDR_MASK; + + if (PAGE_SHIFT == 16) + pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48; + + return pa; +} + +static inline u64 kvm_granule_shift(u32 level) +{ + /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */ + return ARM64_HW_PGTABLE_LEVEL_SHIFT(level); +} + +static inline u64 kvm_granule_size(u32 level) +{ + return BIT(kvm_granule_shift(level)); +} + +static inline bool kvm_level_supports_block_mapping(u32 level) +{ + /* + * Reject invalid block mappings and don't bother with 4TB mappings for + * 52-bit PAs. + */ + return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1)); +} + /** * struct kvm_pgtable_mm_ops - Memory management callbacks. * @zalloc_page: Allocate a single zeroed memory page. diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 78f36bd5df6c..49d768b92997 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -11,16 +11,12 @@ #include <asm/kvm_pgtable.h> #include <asm/stage2_pgtable.h> -#define KVM_PTE_VALID BIT(0) #define KVM_PTE_TYPE BIT(1) #define KVM_PTE_TYPE_BLOCK 0 #define KVM_PTE_TYPE_PAGE 1 #define KVM_PTE_TYPE_TABLE 1 -#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT) -#define KVM_PTE_ADDR_51_48 GENMASK(15, 12) - #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2) #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2) @@ -61,17 +57,6 @@ struct kvm_pgtable_walk_data { u64 end; }; -static u64 kvm_granule_shift(u32 level) -{ - /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */ - return ARM64_HW_PGTABLE_LEVEL_SHIFT(level); -} - -static u64 kvm_granule_size(u32 level) -{ - return BIT(kvm_granule_shift(level)); -} - #define KVM_PHYS_INVALID (-1ULL) static bool kvm_phys_is_valid(u64 phys) @@ -79,15 +64,6 @@ static bool kvm_phys_is_valid(u64 phys) return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX)); } -static bool kvm_level_supports_block_mapping(u32 level) -{ - /* - * Reject invalid block mappings and don't bother with 4TB mappings for - * 52-bit PAs. - */ - return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1)); -} - static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) { u64 granule = kvm_granule_size(level); @@ -135,11 +111,6 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; } -static bool kvm_pte_valid(kvm_pte_t pte) -{ - return pte & KVM_PTE_VALID; -} - static bool kvm_pte_table(kvm_pte_t pte, u32 level) { if (level == KVM_PGTABLE_MAX_LEVELS - 1) @@ -151,16 +122,6 @@ static bool kvm_pte_table(kvm_pte_t pte, u32 level) return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE; } -static u64 kvm_pte_to_phys(kvm_pte_t pte) -{ - u64 pa = pte & KVM_PTE_ADDR_MASK; - - if (PAGE_SHIFT == 16) - pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48; - - return pa; -} - static kvm_pte_t kvm_phys_to_pte(u64 pa) { kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
The KVM pgtable API exposes the kvm_pgtable_walk() function to allow the definition of walkers outside of pgtable.c. However, it is not easy to implement any of those walkers without some of the low-level helpers. Move some of them to the header file to allow re-use from other places. Signed-off-by: Quentin Perret <qperret@google.com> --- arch/arm64/include/asm/kvm_pgtable.h | 40 ++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/pgtable.c | 39 --------------------------- 2 files changed, 40 insertions(+), 39 deletions(-)