[12/15] KVM: MTRR: introduce mtrr_for_each_mem_type
diff mbox

Message ID 1432983566-15773-13-git-send-email-guangrong.xiao@linux.intel.com
State New
Headers show

Commit Message

Xiao Guangrong May 30, 2015, 10:59 a.m. UTC
It walks all MTRRs and gets all the memory cache type setting for the
specified range also it checks if the range is fully covered by MTRRs

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
---
 arch/x86/kvm/mtrr.c | 183 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 183 insertions(+)

Comments

Paolo Bonzini June 1, 2015, 9:33 a.m. UTC | #1
On 30/05/2015 12:59, Xiao Guangrong wrote:
> +struct mtrr_looker {
> +	/* input fields. */
> +	struct kvm_mtrr *mtrr_state;
> +	u64 start;
> +	u64 end;

s/looker/iter/ or s/looker/lookup/

> +static void mtrr_lookup_start(struct mtrr_looker *looker)
> +{
> +	looker->mem_type = -1;
> +
> +	if (!looker->mtrr_state->mtrr_enabled) {
> +		looker->partial_map = true;
> +		return;
> +	}
> +
> +	if (!mtrr_lookup_fixed_start(looker))
> +		mtrr_lookup_var_start(looker);
> +}
> +

Separating mtrr_lookup_start and mtrr_lookup_init is weird.

There are common parts of mtrr_lookup_*_start and mtrr_lookup_*_next.  
For example this:

> +	looker->mem_type = looker->mtrr_state->fixed_ranges[index];
> +	looker->start = fixed_mtrr_range_end_addr(seg, index);
> +	return true;

in mtrr_lookup_fixed_start is the same as this:

> 
> +	end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
> +
> +	/* switch to next segment. */
> +	if (end >= eseg->end) {
> +		looker->seg++;
> +		looker->index = 0;
> +
> +		/* have looked up for all fixed MTRRs. */
> +		if (looker->seg >= ARRAY_SIZE(fixed_seg_table))
> +			return mtrr_lookup_var_start(looker);
> +
> +		end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
> +	}
> +
> +	looker->mem_type = mtrr_state->fixed_ranges[looker->index];
> +	looker->start = end;

in mtrr_lookup_fixed_next.  Can you try to make them more common?

Basically you should have

+#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
+	for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_); \
+	     !mtrr_lookup_end(_looker_); mtrr_lookup_next(_looker_))

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini June 1, 2015, 2:26 p.m. UTC | #2
On 01/06/2015 11:33, Paolo Bonzini wrote:
>> +	looker->mem_type = looker->mtrr_state->fixed_ranges[index];
>> > +	looker->start = fixed_mtrr_range_end_addr(seg, index);
>> > +	return true;
> in mtrr_lookup_fixed_start is the same as this:
> 
>> > 
>> > +	end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
>> > +
>> > +	/* switch to next segment. */
>> > +	if (end >= eseg->end) {
>> > +		looker->seg++;
>> > +		looker->index = 0;
>> > +
>> > +		/* have looked up for all fixed MTRRs. */
>> > +		if (looker->seg >= ARRAY_SIZE(fixed_seg_table))
>> > +			return mtrr_lookup_var_start(looker);
>> > +
>> > +		end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
>> > +	}
>> > +
>> > +	looker->mem_type = mtrr_state->fixed_ranges[looker->index];
>> > +	looker->start = end;
> in mtrr_lookup_fixed_next.  Can you try to make them more common?
> 
> Basically you should have
> 
> +#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
> +	for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_); \
> +	     !mtrr_lookup_end(_looker_); mtrr_lookup_next(_looker_))

... where the above code I quoted would be part of mtrr_lookup_end.

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong June 3, 2015, 2:40 a.m. UTC | #3
On 06/01/2015 05:33 PM, Paolo Bonzini wrote:
>
>
> On 30/05/2015 12:59, Xiao Guangrong wrote:
>> +struct mtrr_looker {
>> +	/* input fields. */
>> +	struct kvm_mtrr *mtrr_state;
>> +	u64 start;
>> +	u64 end;
>
> s/looker/iter/ or s/looker/lookup/

Good to me.

>
>> +static void mtrr_lookup_start(struct mtrr_looker *looker)
>> +{
>> +	looker->mem_type = -1;
>> +
>> +	if (!looker->mtrr_state->mtrr_enabled) {
>> +		looker->partial_map = true;
>> +		return;
>> +	}
>> +
>> +	if (!mtrr_lookup_fixed_start(looker))
>> +		mtrr_lookup_var_start(looker);
>> +}
>> +
>
> Separating mtrr_lookup_start and mtrr_lookup_init is weird.

Agreed, will fold mtrr_lookup_start() into mtrr_lookup_init().

>
> There are common parts of mtrr_lookup_*_start and mtrr_lookup_*_next.
> For example this:
>
>> +	looker->mem_type = looker->mtrr_state->fixed_ranges[index];
>> +	looker->start = fixed_mtrr_range_end_addr(seg, index);
>> +	return true;
>
> in mtrr_lookup_fixed_start is the same as this:
>
>>
>> +	end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
>> +
>> +	/* switch to next segment. */
>> +	if (end >= eseg->end) {
>> +		looker->seg++;
>> +		looker->index = 0;
>> +
>> +		/* have looked up for all fixed MTRRs. */
>> +		if (looker->seg >= ARRAY_SIZE(fixed_seg_table))
>> +			return mtrr_lookup_var_start(looker);
>> +
>> +		end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
>> +	}
>> +
>> +	looker->mem_type = mtrr_state->fixed_ranges[looker->index];
>> +	looker->start = end;
>
> in mtrr_lookup_fixed_next.  Can you try to make them more common?

Yes, i will check them carefully and introduce common functions to do
it.

>
> Basically you should have
>
> +#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
> +	for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_); \
> +	     !mtrr_lookup_end(_looker_); mtrr_lookup_next(_looker_))

Good idea, will do it in v2.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong June 3, 2015, 2:57 a.m. UTC | #4
On 06/01/2015 10:26 PM, Paolo Bonzini wrote:
>
>
> On 01/06/2015 11:33, Paolo Bonzini wrote:
>>> +	looker->mem_type = looker->mtrr_state->fixed_ranges[index];
>>>> +	looker->start = fixed_mtrr_range_end_addr(seg, index);
>>>> +	return true;
>> in mtrr_lookup_fixed_start is the same as this:
>>
>>>>
>>>> +	end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
>>>> +
>>>> +	/* switch to next segment. */
>>>> +	if (end >= eseg->end) {
>>>> +		looker->seg++;
>>>> +		looker->index = 0;
>>>> +
>>>> +		/* have looked up for all fixed MTRRs. */
>>>> +		if (looker->seg >= ARRAY_SIZE(fixed_seg_table))
>>>> +			return mtrr_lookup_var_start(looker);
>>>> +
>>>> +		end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
>>>> +	}
>>>> +
>>>> +	looker->mem_type = mtrr_state->fixed_ranges[looker->index];
>>>> +	looker->start = end;
>> in mtrr_lookup_fixed_next.  Can you try to make them more common?
>>
>> Basically you should have
>>
>> +#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
>> +	for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_); \
>> +	     !mtrr_lookup_end(_looker_); mtrr_lookup_next(_looker_))
>
> ... where the above code I quoted would be part of mtrr_lookup_end.

Okay, will do. :)
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
David Matlack June 9, 2015, 12:36 a.m. UTC | #5
On Sat, May 30, 2015 at 3:59 AM, Xiao Guangrong
<guangrong.xiao@linux.intel.com> wrote:
> It walks all MTRRs and gets all the memory cache type setting for the
> specified range also it checks if the range is fully covered by MTRRs
>
> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
> ---
>  arch/x86/kvm/mtrr.c | 183 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 183 insertions(+)
>
> diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
> index e59d138..35f86303 100644
> --- a/arch/x86/kvm/mtrr.c
> +++ b/arch/x86/kvm/mtrr.c
> @@ -395,6 +395,189 @@ void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
>         INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
>  }
>
> +struct mtrr_looker {
> +       /* input fields. */
> +       struct kvm_mtrr *mtrr_state;
> +       u64 start;
> +       u64 end;
> +
> +       /* output fields. */
> +       int mem_type;
> +       /* [start, end) is fully covered in MTRRs? */

s/fully/not fully/ ?

> +       bool partial_map;
> +
> +       /* private fields. */
> +       union {
> +               /* used for fixed MTRRs. */
> +               struct {
> +                       int index;
> +                       int seg;
> +               };
> +
> +               /* used for var MTRRs. */
> +               struct {
> +                       struct kvm_mtrr_range *range;
> +                       /* max address has been covered in var MTRRs. */
> +                       u64 start_max;
> +               };
> +       };
> +
> +       bool fixed;
> +};
> +
> +static void mtrr_lookup_init(struct mtrr_looker *looker,
> +                            struct kvm_mtrr *mtrr_state, u64 start, u64 end)
> +{
> +       looker->mtrr_state = mtrr_state;
> +       looker->start = start;
> +       looker->end = end;
> +}
> +
> +static u64 fixed_mtrr_range_end_addr(int seg, int index)
> +{
> +       struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
> +
> +        return mtrr_seg->start + mtrr_seg->range_size * index;

Should be (index + 1)?

> +}
> +
> +static bool mtrr_lookup_fixed_start(struct mtrr_looker *looker)
> +{
> +       int seg, index;
> +
> +       if (!looker->mtrr_state->fixed_mtrr_enabled)
> +               return false;
> +
> +       seg = fixed_mtrr_addr_to_seg(looker->start);
> +       if (seg < 0)
> +               return false;
> +
> +       looker->fixed = true;
> +       index = fixed_mtrr_addr_seg_to_range_index(looker->start, seg);
> +       looker->index = index;
> +       looker->seg = seg;
> +       looker->mem_type = looker->mtrr_state->fixed_ranges[index];
> +       looker->start = fixed_mtrr_range_end_addr(seg, index);
> +       return true;
> +}
> +
> +static bool match_var_range(struct mtrr_looker *looker,
> +                           struct kvm_mtrr_range *range)
> +{
> +       u64 start, end;
> +
> +       var_mtrr_range(range, &start, &end);
> +       if (!(start >= looker->end || end <= looker->start)) {
> +               looker->range = range;
> +               looker->mem_type = range->base & 0xff;
> +
> +               /*
> +                * the function is called when we do kvm_mtrr.head walking
> +                * that means range has the minimum base address interleaves
> +                * with [looker->start_max, looker->end).
> +                */

I'm having trouble understanding this comment. I think this is what you
are trying to say:

  this function is called when we do kvm_mtrr.head walking. range has the
  minimum base address which interleaves [looker->start_max, looker->end).

Let me know if I parsed it wrong.

> +               looker->partial_map |= looker->start_max < start;
> +
> +               /* update the max address has been covered. */
> +               looker->start_max = max(looker->start_max, end);
> +               return true;
> +       }
> +
> +       return false;
> +}
> +
> +static void mtrr_lookup_var_start(struct mtrr_looker *looker)
> +{
> +       struct kvm_mtrr *mtrr_state = looker->mtrr_state;
> +       struct kvm_mtrr_range *range;
> +
> +       looker->fixed = false;
> +       looker->partial_map = false;
> +       looker->start_max = looker->start;
> +       looker->mem_type = -1;
> +
> +       list_for_each_entry(range, &mtrr_state->head, node)
> +               if (match_var_range(looker, range))
> +                       return;
> +
> +       looker->partial_map = true;
> +}
> +
> +static void mtrr_lookup_fixed_next(struct mtrr_looker *looker)
> +{
> +       struct fixed_mtrr_segment *eseg = &fixed_seg_table[looker->seg];
> +       struct kvm_mtrr *mtrr_state = looker->mtrr_state;
> +       u64 end;
> +
> +       if (looker->start >= looker->end) {
> +               looker->mem_type = -1;
> +               looker->partial_map = false;
> +               return;
> +       }
> +
> +       WARN_ON(!looker->fixed);
> +
> +       looker->index++;
> +       end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
> +
> +       /* switch to next segment. */
> +       if (end >= eseg->end) {
> +               looker->seg++;
> +               looker->index = 0;
> +
> +               /* have looked up for all fixed MTRRs. */
> +               if (looker->seg >= ARRAY_SIZE(fixed_seg_table))
> +                       return mtrr_lookup_var_start(looker);
> +
> +               end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
> +       }
> +
> +       looker->mem_type = mtrr_state->fixed_ranges[looker->index];
> +       looker->start = end;
> +}
> +
> +static void mtrr_lookup_var_next(struct mtrr_looker *looker)
> +{
> +       struct kvm_mtrr *mtrr_state = looker->mtrr_state;
> +
> +       WARN_ON(looker->fixed);
> +
> +       looker->mem_type = -1;
> +
> +       list_for_each_entry_continue(looker->range, &mtrr_state->head, node)
> +               if (match_var_range(looker, looker->range))
> +                       return;
> +
> +       looker->partial_map |= looker->start_max < looker->end;
> +}
> +
> +static void mtrr_lookup_start(struct mtrr_looker *looker)
> +{
> +       looker->mem_type = -1;
> +
> +       if (!looker->mtrr_state->mtrr_enabled) {
> +               looker->partial_map = true;
> +               return;
> +       }
> +
> +       if (!mtrr_lookup_fixed_start(looker))
> +               mtrr_lookup_var_start(looker);
> +}
> +
> +static void mtrr_lookup_next(struct mtrr_looker *looker)
> +{
> +       WARN_ON(looker->mem_type == -1);
> +
> +       if (looker->fixed)
> +               mtrr_lookup_fixed_next(looker);
> +       else
> +               mtrr_lookup_var_next(looker);
> +}
> +
> +#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
> +       for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_), \
> +            mtrr_lookup_start(_looker_); (_looker_)->mem_type != -1;    \
> +            mtrr_lookup_next(_looker_))
> +
>  u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
>  {
>         struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
> --
> 2.1.0
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong June 9, 2015, 2:45 a.m. UTC | #6
On 06/09/2015 08:36 AM, David Matlack wrote:
> On Sat, May 30, 2015 at 3:59 AM, Xiao Guangrong
> <guangrong.xiao@linux.intel.com> wrote:
>> It walks all MTRRs and gets all the memory cache type setting for the
>> specified range also it checks if the range is fully covered by MTRRs
>>
>> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
>> ---
>>   arch/x86/kvm/mtrr.c | 183 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>>   1 file changed, 183 insertions(+)
>>
>> diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
>> index e59d138..35f86303 100644
>> --- a/arch/x86/kvm/mtrr.c
>> +++ b/arch/x86/kvm/mtrr.c
>> @@ -395,6 +395,189 @@ void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
>>          INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
>>   }
>>
>> +struct mtrr_looker {
>> +       /* input fields. */
>> +       struct kvm_mtrr *mtrr_state;
>> +       u64 start;
>> +       u64 end;
>> +
>> +       /* output fields. */
>> +       int mem_type;
>> +       /* [start, end) is fully covered in MTRRs? */
>
> s/fully/not fully/ ?

Yup, thanks for pointing it out.

>
>> +       bool partial_map;
>> +
>> +       /* private fields. */
>> +       union {
>> +               /* used for fixed MTRRs. */
>> +               struct {
>> +                       int index;
>> +                       int seg;
>> +               };
>> +
>> +               /* used for var MTRRs. */
>> +               struct {
>> +                       struct kvm_mtrr_range *range;
>> +                       /* max address has been covered in var MTRRs. */
>> +                       u64 start_max;
>> +               };
>> +       };
>> +
>> +       bool fixed;
>> +};
>> +
>> +static void mtrr_lookup_init(struct mtrr_looker *looker,
>> +                            struct kvm_mtrr *mtrr_state, u64 start, u64 end)
>> +{
>> +       looker->mtrr_state = mtrr_state;
>> +       looker->start = start;
>> +       looker->end = end;
>> +}
>> +
>> +static u64 fixed_mtrr_range_end_addr(int seg, int index)
>> +{
>> +       struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
>> +
>> +        return mtrr_seg->start + mtrr_seg->range_size * index;
>
> Should be (index + 1)?

Good eyes, will fix.

>
>> +}
>> +
>> +static bool mtrr_lookup_fixed_start(struct mtrr_looker *looker)
>> +{
>> +       int seg, index;
>> +
>> +       if (!looker->mtrr_state->fixed_mtrr_enabled)
>> +               return false;
>> +
>> +       seg = fixed_mtrr_addr_to_seg(looker->start);
>> +       if (seg < 0)
>> +               return false;
>> +
>> +       looker->fixed = true;
>> +       index = fixed_mtrr_addr_seg_to_range_index(looker->start, seg);
>> +       looker->index = index;
>> +       looker->seg = seg;
>> +       looker->mem_type = looker->mtrr_state->fixed_ranges[index];
>> +       looker->start = fixed_mtrr_range_end_addr(seg, index);
>> +       return true;
>> +}
>> +
>> +static bool match_var_range(struct mtrr_looker *looker,
>> +                           struct kvm_mtrr_range *range)
>> +{
>> +       u64 start, end;
>> +
>> +       var_mtrr_range(range, &start, &end);
>> +       if (!(start >= looker->end || end <= looker->start)) {
>> +               looker->range = range;
>> +               looker->mem_type = range->base & 0xff;
>> +
>> +               /*
>> +                * the function is called when we do kvm_mtrr.head walking
>> +                * that means range has the minimum base address interleaves
>> +                * with [looker->start_max, looker->end).
>> +                */
>
> I'm having trouble understanding this comment. I think this is what you
> are trying to say:
>
>    this function is called when we do kvm_mtrr.head walking. range has the
>    minimum base address which interleaves [looker->start_max, looker->end).
>
> Let me know if I parsed it wrong.

Yes, it is, will improve the comment.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch
diff mbox

diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index e59d138..35f86303 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -395,6 +395,189 @@  void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
 	INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
 }
 
+struct mtrr_looker {
+	/* input fields. */
+	struct kvm_mtrr *mtrr_state;
+	u64 start;
+	u64 end;
+
+	/* output fields. */
+	int mem_type;
+	/* [start, end) is fully covered in MTRRs? */
+	bool partial_map;
+
+	/* private fields. */
+	union {
+		/* used for fixed MTRRs. */
+		struct {
+			int index;
+			int seg;
+		};
+
+		/* used for var MTRRs. */
+		struct {
+			struct kvm_mtrr_range *range;
+			/* max address has been covered in var MTRRs. */
+			u64 start_max;
+		};
+	};
+
+	bool fixed;
+};
+
+static void mtrr_lookup_init(struct mtrr_looker *looker,
+			     struct kvm_mtrr *mtrr_state, u64 start, u64 end)
+{
+	looker->mtrr_state = mtrr_state;
+	looker->start = start;
+	looker->end = end;
+}
+
+static u64 fixed_mtrr_range_end_addr(int seg, int index)
+{
+	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
+
+	 return mtrr_seg->start + mtrr_seg->range_size * index;
+}
+
+static bool mtrr_lookup_fixed_start(struct mtrr_looker *looker)
+{
+	int seg, index;
+
+	if (!looker->mtrr_state->fixed_mtrr_enabled)
+		return false;
+
+	seg = fixed_mtrr_addr_to_seg(looker->start);
+	if (seg < 0)
+		return false;
+
+	looker->fixed = true;
+	index = fixed_mtrr_addr_seg_to_range_index(looker->start, seg);
+	looker->index = index;
+	looker->seg = seg;
+	looker->mem_type = looker->mtrr_state->fixed_ranges[index];
+	looker->start = fixed_mtrr_range_end_addr(seg, index);
+	return true;
+}
+
+static bool match_var_range(struct mtrr_looker *looker,
+			    struct kvm_mtrr_range *range)
+{
+	u64 start, end;
+
+	var_mtrr_range(range, &start, &end);
+	if (!(start >= looker->end || end <= looker->start)) {
+		looker->range = range;
+		looker->mem_type = range->base & 0xff;
+
+		/*
+		 * the function is called when we do kvm_mtrr.head walking
+		 * that means range has the minimum base address interleaves
+		 * with [looker->start_max, looker->end).
+		 */
+		looker->partial_map |= looker->start_max < start;
+
+		/* update the max address has been covered. */
+		looker->start_max = max(looker->start_max, end);
+		return true;
+	}
+
+	return false;
+}
+
+static void mtrr_lookup_var_start(struct mtrr_looker *looker)
+{
+	struct kvm_mtrr *mtrr_state = looker->mtrr_state;
+	struct kvm_mtrr_range *range;
+
+	looker->fixed = false;
+	looker->partial_map = false;
+	looker->start_max = looker->start;
+	looker->mem_type = -1;
+
+	list_for_each_entry(range, &mtrr_state->head, node)
+		if (match_var_range(looker, range))
+			return;
+
+	looker->partial_map = true;
+}
+
+static void mtrr_lookup_fixed_next(struct mtrr_looker *looker)
+{
+	struct fixed_mtrr_segment *eseg = &fixed_seg_table[looker->seg];
+	struct kvm_mtrr *mtrr_state = looker->mtrr_state;
+	u64 end;
+
+	if (looker->start >= looker->end) {
+		looker->mem_type = -1;
+		looker->partial_map = false;
+		return;
+	}
+
+	WARN_ON(!looker->fixed);
+
+	looker->index++;
+	end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
+
+	/* switch to next segment. */
+	if (end >= eseg->end) {
+		looker->seg++;
+		looker->index = 0;
+
+		/* have looked up for all fixed MTRRs. */
+		if (looker->seg >= ARRAY_SIZE(fixed_seg_table))
+			return mtrr_lookup_var_start(looker);
+
+		end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
+	}
+
+	looker->mem_type = mtrr_state->fixed_ranges[looker->index];
+	looker->start = end;
+}
+
+static void mtrr_lookup_var_next(struct mtrr_looker *looker)
+{
+	struct kvm_mtrr *mtrr_state = looker->mtrr_state;
+
+	WARN_ON(looker->fixed);
+
+	looker->mem_type = -1;
+
+	list_for_each_entry_continue(looker->range, &mtrr_state->head, node)
+		if (match_var_range(looker, looker->range))
+			return;
+
+	looker->partial_map |= looker->start_max < looker->end;
+}
+
+static void mtrr_lookup_start(struct mtrr_looker *looker)
+{
+	looker->mem_type = -1;
+
+	if (!looker->mtrr_state->mtrr_enabled) {
+		looker->partial_map = true;
+		return;
+	}
+
+	if (!mtrr_lookup_fixed_start(looker))
+		mtrr_lookup_var_start(looker);
+}
+
+static void mtrr_lookup_next(struct mtrr_looker *looker)
+{
+	WARN_ON(looker->mem_type == -1);
+
+	if (looker->fixed)
+		mtrr_lookup_fixed_next(looker);
+	else
+		mtrr_lookup_var_next(looker);
+}
+
+#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
+	for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_), \
+	     mtrr_lookup_start(_looker_); (_looker_)->mem_type != -1;	 \
+	     mtrr_lookup_next(_looker_))
+
 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;