diff mbox

[v1,02/13] KVM: x86: mmu: use for_each_shadow_entry_lockless()

Message ID 20170804131428.15844-3-david@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

David Hildenbrand Aug. 4, 2017, 1:14 p.m. UTC
Certainly better to read.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 arch/x86/kvm/mmu.c | 21 ++++++++-------------
 1 file changed, 8 insertions(+), 13 deletions(-)

Comments

Paolo Bonzini Aug. 14, 2017, 10:21 a.m. UTC | #1
On 04/08/2017 15:14, David Hildenbrand wrote:
> Certainly better to read.
> 
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  arch/x86/kvm/mmu.c | 21 ++++++++-------------
>  1 file changed, 8 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 9ed26cc..3769613 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3596,8 +3596,8 @@ static bool
>  walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>  {
>  	struct kvm_shadow_walk_iterator iterator;
> -	u64 sptes[PT64_ROOT_LEVEL], spte = 0ull;
> -	int root, leaf;
> +	u64 sptes[PT64_ROOT_LEVEL] = { 0 }, spte = 0ull;
> +	int level;
>  	bool reserved = false;
>  
>  	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> @@ -3605,14 +3605,8 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>  
>  	walk_shadow_page_lockless_begin(vcpu);
>  
> -	for (shadow_walk_init(&iterator, vcpu, addr),
> -		 leaf = root = iterator.level;
> -	     shadow_walk_okay(&iterator);
> -	     __shadow_walk_next(&iterator, spte)) {
> -		spte = mmu_spte_get_lockless(iterator.sptep);
> -
> -		sptes[leaf - 1] = spte;
> -		leaf--;
> +	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
> +		sptes[iterator.level - 1] = spte;

If you leave a

	leaf = iterator.level;

(note I'm note subtracting 1 here; maybe s/leaf/last/ could be a good
idea, too)

>  		if (!is_shadow_present_pte(spte))
>  			break;
> @@ -3626,10 +3620,11 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>  	if (reserved) {
>  		pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
>  		       __func__, addr);
> -		while (root > leaf) {
> +		for (level = PT64_ROOT_LEVEL; level > 0; level--) {
> +			if (!sptes[level - 1])
> +				continue;

then here you might use

	for (level = vcpu->arch.mmu.shadow_root_level;
	     level >= leaf; level--)

instead?

Paolo
David Hildenbrand Aug. 17, 2017, 10:12 a.m. UTC | #2
On 14.08.2017 12:21, Paolo Bonzini wrote:
> On 04/08/2017 15:14, David Hildenbrand wrote:
>> Certainly better to read.
>>
>> Signed-off-by: David Hildenbrand <david@redhat.com>
>> ---
>>  arch/x86/kvm/mmu.c | 21 ++++++++-------------
>>  1 file changed, 8 insertions(+), 13 deletions(-)
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index 9ed26cc..3769613 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -3596,8 +3596,8 @@ static bool
>>  walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>>  {
>>  	struct kvm_shadow_walk_iterator iterator;
>> -	u64 sptes[PT64_ROOT_LEVEL], spte = 0ull;
>> -	int root, leaf;
>> +	u64 sptes[PT64_ROOT_LEVEL] = { 0 }, spte = 0ull;
>> +	int level;
>>  	bool reserved = false;
>>  
>>  	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
>> @@ -3605,14 +3605,8 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>>  
>>  	walk_shadow_page_lockless_begin(vcpu);
>>  
>> -	for (shadow_walk_init(&iterator, vcpu, addr),
>> -		 leaf = root = iterator.level;
>> -	     shadow_walk_okay(&iterator);
>> -	     __shadow_walk_next(&iterator, spte)) {
>> -		spte = mmu_spte_get_lockless(iterator.sptep);
>> -
>> -		sptes[leaf - 1] = spte;
>> -		leaf--;
>> +	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
>> +		sptes[iterator.level - 1] = spte;
> 
> If you leave a
> 
> 	leaf = iterator.level;
> 
> (note I'm note subtracting 1 here; maybe s/leaf/last/ could be a good
> idea, too)
> 
>>  		if (!is_shadow_present_pte(spte))
>>  			break;
>> @@ -3626,10 +3620,11 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>>  	if (reserved) {
>>  		pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
>>  		       __func__, addr);
>> -		while (root > leaf) {
>> +		for (level = PT64_ROOT_LEVEL; level > 0; level--) {
>> +			if (!sptes[level - 1])
>> +				continue;
> 
> then here you might use
> 
> 	for (level = vcpu->arch.mmu.shadow_root_level;
> 	     level >= leaf; level--)

I also had that in mind, but shadow_walk_init() tells me that
using vcpu->arch.mmu.shadow_root_level might not be correct?

Alternative 1: get rid of this debug output completely
Alternative 2: add dump_shadow_entries(struct kvm_vcpu *vcpu, u64 addr)
(we might dump entries that are now different on the second walk, but I
highly doubt that this is relevant)

> 
> instead?
> 
> Paolo
>
Paolo Bonzini Aug. 17, 2017, 10:23 a.m. UTC | #3
On 17/08/2017 12:12, David Hildenbrand wrote:
> On 14.08.2017 12:21, Paolo Bonzini wrote:
>> On 04/08/2017 15:14, David Hildenbrand wrote:
>>> Certainly better to read.
>>>
>>> Signed-off-by: David Hildenbrand <david@redhat.com>
>>> ---
>>>  arch/x86/kvm/mmu.c | 21 ++++++++-------------
>>>  1 file changed, 8 insertions(+), 13 deletions(-)
>>>
>>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>>> index 9ed26cc..3769613 100644
>>> --- a/arch/x86/kvm/mmu.c
>>> +++ b/arch/x86/kvm/mmu.c
>>> @@ -3596,8 +3596,8 @@ static bool
>>>  walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>>>  {
>>>  	struct kvm_shadow_walk_iterator iterator;
>>> -	u64 sptes[PT64_ROOT_LEVEL], spte = 0ull;
>>> -	int root, leaf;
>>> +	u64 sptes[PT64_ROOT_LEVEL] = { 0 }, spte = 0ull;
>>> +	int level;
>>>  	bool reserved = false;
>>>  
>>>  	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
>>> @@ -3605,14 +3605,8 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>>>  
>>>  	walk_shadow_page_lockless_begin(vcpu);
>>>  
>>> -	for (shadow_walk_init(&iterator, vcpu, addr),
>>> -		 leaf = root = iterator.level;
>>> -	     shadow_walk_okay(&iterator);
>>> -	     __shadow_walk_next(&iterator, spte)) {
>>> -		spte = mmu_spte_get_lockless(iterator.sptep);
>>> -
>>> -		sptes[leaf - 1] = spte;
>>> -		leaf--;
>>> +	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
>>> +		sptes[iterator.level - 1] = spte;
>>
>> If you leave a
>>
>> 	leaf = iterator.level;
>>
>> (note I'm note subtracting 1 here; maybe s/leaf/last/ could be a good
>> idea, too)
>>
>>>  		if (!is_shadow_present_pte(spte))
>>>  			break;
>>> @@ -3626,10 +3620,11 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>>>  	if (reserved) {
>>>  		pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
>>>  		       __func__, addr);
>>> -		while (root > leaf) {
>>> +		for (level = PT64_ROOT_LEVEL; level > 0; level--) {
>>> +			if (!sptes[level - 1])
>>> +				continue;
>>
>> then here you might use
>>
>> 	for (level = vcpu->arch.mmu.shadow_root_level;
>> 	     level >= leaf; level--)
> 
> I also had that in mind, but shadow_walk_init() tells me that
> using vcpu->arch.mmu.shadow_root_level might not be correct?
> 
> Alternative 1: get rid of this debug output completely
> Alternative 2: add dump_shadow_entries(struct kvm_vcpu *vcpu, u64 addr)
> (we might dump entries that are now different on the second walk, but I
> highly doubt that this is relevant)

Hmm, I might just ask you to drop this patch, after all.

Paolo
David Hildenbrand Aug. 17, 2017, 10:39 a.m. UTC | #4
>>> then here you might use
>>>
>>> 	for (level = vcpu->arch.mmu.shadow_root_level;
>>> 	     level >= leaf; level--)
>>
>> I also had that in mind, but shadow_walk_init() tells me that
>> using vcpu->arch.mmu.shadow_root_level might not be correct?
>>
>> Alternative 1: get rid of this debug output completely
>> Alternative 2: add dump_shadow_entries(struct kvm_vcpu *vcpu, u64 addr)
>> (we might dump entries that are now different on the second walk, but I
>> highly doubt that this is relevant)
> 
> Hmm, I might just ask you to drop this patch, after all.

Sure, I just found for_each_shadow_entry_lockless() to be better fitting
here. I'll drop it.

> 
> Paolo
>
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9ed26cc..3769613 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3596,8 +3596,8 @@  static bool
 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 {
 	struct kvm_shadow_walk_iterator iterator;
-	u64 sptes[PT64_ROOT_LEVEL], spte = 0ull;
-	int root, leaf;
+	u64 sptes[PT64_ROOT_LEVEL] = { 0 }, spte = 0ull;
+	int level;
 	bool reserved = false;
 
 	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
@@ -3605,14 +3605,8 @@  walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 
 	walk_shadow_page_lockless_begin(vcpu);
 
-	for (shadow_walk_init(&iterator, vcpu, addr),
-		 leaf = root = iterator.level;
-	     shadow_walk_okay(&iterator);
-	     __shadow_walk_next(&iterator, spte)) {
-		spte = mmu_spte_get_lockless(iterator.sptep);
-
-		sptes[leaf - 1] = spte;
-		leaf--;
+	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
+		sptes[iterator.level - 1] = spte;
 
 		if (!is_shadow_present_pte(spte))
 			break;
@@ -3626,10 +3620,11 @@  walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 	if (reserved) {
 		pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
 		       __func__, addr);
-		while (root > leaf) {
+		for (level = PT64_ROOT_LEVEL; level > 0; level--) {
+			if (!sptes[level - 1])
+				continue;
 			pr_err("------ spte 0x%llx level %d.\n",
-			       sptes[root - 1], root);
-			root--;
+			       sptes[level - 1], level);
 		}
 	}
 exit: