diff mbox

[v2,2/5] kvm: x86: mmu: Rename spte_is_locklessly_modifiable()

Message ID 1478646030-101103-3-git-send-email-junaids@google.com (mailing list archive)
State New, archived
Headers show

Commit Message

Junaid Shahid Nov. 8, 2016, 11 p.m. UTC
This change renames spte_is_locklessly_modifiable() to
spte_can_locklessly_be_made_writable() to distinguish it from other
forms of lockless modifications. The full set of lockless modifications
is covered by spte_has_volatile_bits().

Signed-off-by: Junaid Shahid <junaids@google.com>
---
 arch/x86/kvm/mmu.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

Comments

Paolo Bonzini Nov. 21, 2016, 1:07 p.m. UTC | #1
On 09/11/2016 00:00, Junaid Shahid wrote:
> This change renames spte_is_locklessly_modifiable() to
> spte_can_locklessly_be_made_writable() to distinguish it from other
> forms of lockless modifications. The full set of lockless modifications
> is covered by spte_has_volatile_bits().
> 
> Signed-off-by: Junaid Shahid <junaids@google.com>
> ---
>  arch/x86/kvm/mmu.c | 10 +++++-----
>  1 file changed, 5 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index d9c7e98..e580134 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -473,7 +473,7 @@ retry:
>  }
>  #endif
>  
> -static bool spte_is_locklessly_modifiable(u64 spte)
> +static bool spte_can_locklessly_be_made_writable(u64 spte)
>  {
>  	return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
>  		(SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
> @@ -487,7 +487,7 @@ static bool spte_has_volatile_bits(u64 spte)
>  	 * also, it can help us to get a stable is_writable_pte()
>  	 * to ensure tlb flush is not missed.
>  	 */
> -	if (spte_is_locklessly_modifiable(spte))
> +	if (spte_can_locklessly_be_made_writable(spte))
>  		return true;
>  
>  	if (!shadow_accessed_mask)
> @@ -556,7 +556,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
>  	 * we always atomically update it, see the comments in
>  	 * spte_has_volatile_bits().
>  	 */
> -	if (spte_is_locklessly_modifiable(old_spte) &&
> +	if (spte_can_locklessly_be_made_writable(old_spte) &&
>  	      !is_writable_pte(new_spte))
>  		ret = true;
>  
> @@ -1212,7 +1212,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
>  	u64 spte = *sptep;
>  
>  	if (!is_writable_pte(spte) &&
> -	      !(pt_protect && spte_is_locklessly_modifiable(spte)))
> +	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
>  		return false;
>  
>  	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
> @@ -2973,7 +2973,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
>  	 * Currently, to simplify the code, only the spte write-protected
>  	 * by dirty-log can be fast fixed.
>  	 */
> -	if (!spte_is_locklessly_modifiable(spte))
> +	if (!spte_can_locklessly_be_made_writable(spte))
>  		goto exit;
>  
>  	/*
> 

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d9c7e98..e580134 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -473,7 +473,7 @@  retry:
 }
 #endif
 
-static bool spte_is_locklessly_modifiable(u64 spte)
+static bool spte_can_locklessly_be_made_writable(u64 spte)
 {
 	return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
 		(SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
@@ -487,7 +487,7 @@  static bool spte_has_volatile_bits(u64 spte)
 	 * also, it can help us to get a stable is_writable_pte()
 	 * to ensure tlb flush is not missed.
 	 */
-	if (spte_is_locklessly_modifiable(spte))
+	if (spte_can_locklessly_be_made_writable(spte))
 		return true;
 
 	if (!shadow_accessed_mask)
@@ -556,7 +556,7 @@  static bool mmu_spte_update(u64 *sptep, u64 new_spte)
 	 * we always atomically update it, see the comments in
 	 * spte_has_volatile_bits().
 	 */
-	if (spte_is_locklessly_modifiable(old_spte) &&
+	if (spte_can_locklessly_be_made_writable(old_spte) &&
 	      !is_writable_pte(new_spte))
 		ret = true;
 
@@ -1212,7 +1212,7 @@  static bool spte_write_protect(u64 *sptep, bool pt_protect)
 	u64 spte = *sptep;
 
 	if (!is_writable_pte(spte) &&
-	      !(pt_protect && spte_is_locklessly_modifiable(spte)))
+	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
 		return false;
 
 	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
@@ -2973,7 +2973,7 @@  static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
 	 * Currently, to simplify the code, only the spte write-protected
 	 * by dirty-log can be fast fixed.
 	 */
-	if (!spte_is_locklessly_modifiable(spte))
+	if (!spte_can_locklessly_be_made_writable(spte))
 		goto exit;
 
 	/*