diff mbox series

[1/2] KVM: X86/MMU: Add sp_has_gptes()

Message ID 20220420131204.2850-2-jiangshanlai@gmail.com (mailing list archive)
State New, archived
Headers show
Series KVM: X86/MMU: Fix problem for shadowing 5-level NPT for 4-level NPT L1 guest | expand

Commit Message

Lai Jiangshan April 20, 2022, 1:12 p.m. UTC
From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

Add sp_has_gptes() which equals to !sp->role.direct currently.

Shadow page having gptes needs to be write-protected, accounted and
responded to kvm_mmu_pte_write().

Use it in these places to replace !sp->role.direct and rename
for_each_gfn_indirect_valid_sp.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/kvm/mmu/mmu.c | 22 +++++++++++++++-------
 1 file changed, 15 insertions(+), 7 deletions(-)

Comments

Paolo Bonzini April 20, 2022, 8:48 p.m. UTC | #1
On 4/20/22 15:12, Lai Jiangshan wrote:
> From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> 
> Add sp_has_gptes() which equals to !sp->role.direct currently.
> 
> Shadow page having gptes needs to be write-protected, accounted and
> responded to kvm_mmu_pte_write().
> 
> Use it in these places to replace !sp->role.direct and rename
> for_each_gfn_indirect_valid_sp.
> 
> -#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
> +#define for_each_gfn_valid_sp_has_gptes(_kvm, _sp, _gfn)		\

Small nit, for_each_gfn_valid_sp_with_gptes might be a little more 
grammatical (not much).

>   	for_each_valid_sp(_kvm, _sp,					\
>   	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
> -		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
> +		if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
>   
>   static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
>   			 struct list_head *invalid_list)
> @@ -2112,7 +2120,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
>   	sp->gfn = gfn;
>   	sp->role = role;
>   	hlist_add_head(&sp->hash_link, sp_list);
> -	if (!direct) {
> +	if (sp_has_gptes(sp)) {
>   		account_shadowed(vcpu->kvm, sp);
>   		if (level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
>   			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
> @@ -2321,7 +2329,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
>   	/* Zapping children means active_mmu_pages has become unstable. */
>   	list_unstable = *nr_zapped;
>   
> -	if (!sp->role.invalid && !sp->role.direct)
> +	if (!sp->role.invalid && sp_has_gptes(sp))
>   		unaccount_shadowed(kvm, sp);
>   
>   	if (sp->unsync)
> @@ -2501,7 +2509,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
>   	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
>   	r = 0;
>   	write_lock(&kvm->mmu_lock);
> -	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
> +	for_each_gfn_valid_sp_has_gptes(kvm, sp, gfn) {
>   		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
>   			 sp->role.word);
>   		r = 1;
> @@ -2563,7 +2571,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
>   	 * that case, KVM must complete emulation of the guest TLB flush before
>   	 * allowing shadow pages to become unsync (writable by the guest).
>   	 */
> -	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
> +	for_each_gfn_valid_sp_has_gptes(kvm, sp, gfn) {
>   		if (!can_unsync)
>   			return -EPERM;
>   
> @@ -5311,7 +5319,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
>   
>   	++vcpu->kvm->stat.mmu_pte_write;
>   
> -	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
> +	for_each_gfn_valid_sp_has_gptes(vcpu->kvm, sp, gfn) {
>   		if (detect_write_misaligned(sp, gpa, bytes) ||
>   		      detect_write_flooding(sp)) {
>   			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 1361eb4599b4..1bdff55218ef 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1856,15 +1856,23 @@  static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 				    struct list_head *invalid_list);
 
+static bool sp_has_gptes(struct kvm_mmu_page *sp)
+{
+	if (sp->role.direct)
+		return false;
+
+	return true;
+}
+
 #define for_each_valid_sp(_kvm, _sp, _list)				\
 	hlist_for_each_entry(_sp, _list, hash_link)			\
 		if (is_obsolete_sp((_kvm), (_sp))) {			\
 		} else
 
-#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
+#define for_each_gfn_valid_sp_has_gptes(_kvm, _sp, _gfn)		\
 	for_each_valid_sp(_kvm, _sp,					\
 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
-		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
+		if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
 
 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			 struct list_head *invalid_list)
@@ -2112,7 +2120,7 @@  static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 	sp->gfn = gfn;
 	sp->role = role;
 	hlist_add_head(&sp->hash_link, sp_list);
-	if (!direct) {
+	if (sp_has_gptes(sp)) {
 		account_shadowed(vcpu->kvm, sp);
 		if (level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
@@ -2321,7 +2329,7 @@  static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
 	/* Zapping children means active_mmu_pages has become unstable. */
 	list_unstable = *nr_zapped;
 
-	if (!sp->role.invalid && !sp->role.direct)
+	if (!sp->role.invalid && sp_has_gptes(sp))
 		unaccount_shadowed(kvm, sp);
 
 	if (sp->unsync)
@@ -2501,7 +2509,7 @@  int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
 	r = 0;
 	write_lock(&kvm->mmu_lock);
-	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
+	for_each_gfn_valid_sp_has_gptes(kvm, sp, gfn) {
 		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
 			 sp->role.word);
 		r = 1;
@@ -2563,7 +2571,7 @@  int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
 	 * that case, KVM must complete emulation of the guest TLB flush before
 	 * allowing shadow pages to become unsync (writable by the guest).
 	 */
-	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
+	for_each_gfn_valid_sp_has_gptes(kvm, sp, gfn) {
 		if (!can_unsync)
 			return -EPERM;
 
@@ -5311,7 +5319,7 @@  static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 
 	++vcpu->kvm->stat.mmu_pte_write;
 
-	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
+	for_each_gfn_valid_sp_has_gptes(vcpu->kvm, sp, gfn) {
 		if (detect_write_misaligned(sp, gpa, bytes) ||
 		      detect_write_flooding(sp)) {
 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);