diff mbox series

[v6,08/16] KVM: TDX: Pin pages via get_page() right before ADD/AUG'ed to TDs

Message ID c8d8b880963cc6799b681f7905a956022e47f16f.1699368363.git.isaku.yamahata@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM TDX: TDP MMU: large page support | expand

Commit Message

Isaku Yamahata Nov. 7, 2023, 3 p.m. UTC
From: Xiaoyao Li <xiaoyao.li@intel.com>

When kvm_faultin_pfn(), it doesn't have the info regarding which page level
will the gfn be mapped at. Hence it doesn't know to pin a 4K page or a
2M page.

Move the guest private pages pinning logic right before
TDH_MEM_PAGE_ADD/AUG() since at that time it knows the page level info.

Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
---
 arch/x86/kvm/vmx/tdx.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

Comments

Binbin Wu Nov. 20, 2023, 11:05 a.m. UTC | #1
On 11/7/2023 11:00 PM, isaku.yamahata@intel.com wrote:
> From: Xiaoyao Li <xiaoyao.li@intel.com>
>
> When kvm_faultin_pfn(), it doesn't have the info regarding which page level
> will the gfn be mapped at. Hence it doesn't know to pin a 4K page or a
> 2M page.
>
> Move the guest private pages pinning logic right before
> TDH_MEM_PAGE_ADD/AUG() since at that time it knows the page level info.
This patch looks strange, the code has nothing to do with the shortlog.
It seems that the change of this patch has already been covered by 06/16.

Something went wrong when formatting the patch?

>
> Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
> ---
>   arch/x86/kvm/vmx/tdx.c | 15 ++++++++-------
>   1 file changed, 8 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index e4167f08b58b..7b81811eb404 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -1454,7 +1454,8 @@ static void tdx_measure_page(struct kvm_tdx *kvm_tdx, hpa_t gpa, int size)
>   	}
>   }
>   
> -static void tdx_unpin(struct kvm *kvm, kvm_pfn_t pfn, int level)
> +static void tdx_unpin(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
> +		      enum pg_level level)
>   {
>   	int i;
>   
> @@ -1476,7 +1477,7 @@ static int tdx_sept_page_aug(struct kvm *kvm, gfn_t gfn,
>   
>   	err = tdh_mem_page_aug(kvm_tdx->tdr_pa, gpa, tdx_level, hpa, &out);
>   	if (unlikely(err == TDX_ERROR_SEPT_BUSY)) {
> -		tdx_unpin(kvm, pfn, level);
> +		tdx_unpin(kvm, gfn, pfn, level);
>   		return -EAGAIN;
>   	}
>   	if (unlikely(err == (TDX_EPT_ENTRY_STATE_INCORRECT | TDX_OPERAND_ID_RCX))) {
> @@ -1493,7 +1494,7 @@ static int tdx_sept_page_aug(struct kvm *kvm, gfn_t gfn,
>   	}
>   	if (KVM_BUG_ON(err, kvm)) {
>   		pr_tdx_error(TDH_MEM_PAGE_AUG, err, &out);
> -		tdx_unpin(kvm, pfn, level);
> +		tdx_unpin(kvm, gfn, pfn, level);
>   		return -EIO;
>   	}
>   
> @@ -1529,7 +1530,7 @@ static int tdx_sept_page_add(struct kvm *kvm, gfn_t gfn,
>   	 * always uses vcpu 0's page table and protected by vcpu->mutex).
>   	 */
>   	if (KVM_BUG_ON(kvm_tdx->source_pa == INVALID_PAGE, kvm)) {
> -		tdx_unpin(kvm, pfn, level);
> +		tdx_unpin(kvm, gfn, pfn, level);
>   		return -EINVAL;
>   	}
>   
> @@ -1547,7 +1548,7 @@ static int tdx_sept_page_add(struct kvm *kvm, gfn_t gfn,
>   	} while (unlikely(err == TDX_ERROR_SEPT_BUSY));
>   	if (KVM_BUG_ON(err, kvm)) {
>   		pr_tdx_error(TDH_MEM_PAGE_ADD, err, &out);
> -		tdx_unpin(kvm, pfn, level);
> +		tdx_unpin(kvm, gfn, pfn, level);
>   		return -EIO;
>   	} else if (measure)
>   		tdx_measure_page(kvm_tdx, gpa, KVM_HPAGE_SIZE(level));
> @@ -1600,7 +1601,7 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
>   		err = tdx_reclaim_page(hpa, level);
>   		if (KVM_BUG_ON(err, kvm))
>   			return -EIO;
> -		tdx_unpin(kvm, pfn, level);
> +		tdx_unpin(kvm, gfn, pfn, level);
>   		return 0;
>   	}
>   
> @@ -1633,7 +1634,7 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
>   			r = -EIO;
>   		} else {
>   			tdx_clear_page(hpa, PAGE_SIZE);
> -			tdx_unpin(kvm, pfn + i, PG_LEVEL_4K);
> +			tdx_unpin(kvm, gfn + i, pfn + i, PG_LEVEL_4K);
>   		}
>   		hpa += PAGE_SIZE;
>   	}
Isaku Yamahata Nov. 21, 2023, 10:04 a.m. UTC | #2
On Mon, Nov 20, 2023 at 07:05:39PM +0800,
Binbin Wu <binbin.wu@linux.intel.com> wrote:

> 
> 
> On 11/7/2023 11:00 PM, isaku.yamahata@intel.com wrote:
> > From: Xiaoyao Li <xiaoyao.li@intel.com>
> > 
> > When kvm_faultin_pfn(), it doesn't have the info regarding which page level
> > will the gfn be mapped at. Hence it doesn't know to pin a 4K page or a
> > 2M page.
> > 
> > Move the guest private pages pinning logic right before
> > TDH_MEM_PAGE_ADD/AUG() since at that time it knows the page level info.
> This patch looks strange, the code has nothing to do with the shortlog.
> It seems that the change of this patch has already been covered by 06/16.
> 
> Something went wrong when formatting the patch?

Oh, right. This patch doesn't make sense any more. I''ll drop this patch.
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index e4167f08b58b..7b81811eb404 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1454,7 +1454,8 @@  static void tdx_measure_page(struct kvm_tdx *kvm_tdx, hpa_t gpa, int size)
 	}
 }
 
-static void tdx_unpin(struct kvm *kvm, kvm_pfn_t pfn, int level)
+static void tdx_unpin(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
+		      enum pg_level level)
 {
 	int i;
 
@@ -1476,7 +1477,7 @@  static int tdx_sept_page_aug(struct kvm *kvm, gfn_t gfn,
 
 	err = tdh_mem_page_aug(kvm_tdx->tdr_pa, gpa, tdx_level, hpa, &out);
 	if (unlikely(err == TDX_ERROR_SEPT_BUSY)) {
-		tdx_unpin(kvm, pfn, level);
+		tdx_unpin(kvm, gfn, pfn, level);
 		return -EAGAIN;
 	}
 	if (unlikely(err == (TDX_EPT_ENTRY_STATE_INCORRECT | TDX_OPERAND_ID_RCX))) {
@@ -1493,7 +1494,7 @@  static int tdx_sept_page_aug(struct kvm *kvm, gfn_t gfn,
 	}
 	if (KVM_BUG_ON(err, kvm)) {
 		pr_tdx_error(TDH_MEM_PAGE_AUG, err, &out);
-		tdx_unpin(kvm, pfn, level);
+		tdx_unpin(kvm, gfn, pfn, level);
 		return -EIO;
 	}
 
@@ -1529,7 +1530,7 @@  static int tdx_sept_page_add(struct kvm *kvm, gfn_t gfn,
 	 * always uses vcpu 0's page table and protected by vcpu->mutex).
 	 */
 	if (KVM_BUG_ON(kvm_tdx->source_pa == INVALID_PAGE, kvm)) {
-		tdx_unpin(kvm, pfn, level);
+		tdx_unpin(kvm, gfn, pfn, level);
 		return -EINVAL;
 	}
 
@@ -1547,7 +1548,7 @@  static int tdx_sept_page_add(struct kvm *kvm, gfn_t gfn,
 	} while (unlikely(err == TDX_ERROR_SEPT_BUSY));
 	if (KVM_BUG_ON(err, kvm)) {
 		pr_tdx_error(TDH_MEM_PAGE_ADD, err, &out);
-		tdx_unpin(kvm, pfn, level);
+		tdx_unpin(kvm, gfn, pfn, level);
 		return -EIO;
 	} else if (measure)
 		tdx_measure_page(kvm_tdx, gpa, KVM_HPAGE_SIZE(level));
@@ -1600,7 +1601,7 @@  static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
 		err = tdx_reclaim_page(hpa, level);
 		if (KVM_BUG_ON(err, kvm))
 			return -EIO;
-		tdx_unpin(kvm, pfn, level);
+		tdx_unpin(kvm, gfn, pfn, level);
 		return 0;
 	}
 
@@ -1633,7 +1634,7 @@  static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
 			r = -EIO;
 		} else {
 			tdx_clear_page(hpa, PAGE_SIZE);
-			tdx_unpin(kvm, pfn + i, PG_LEVEL_4K);
+			tdx_unpin(kvm, gfn + i, pfn + i, PG_LEVEL_4K);
 		}
 		hpa += PAGE_SIZE;
 	}