diff mbox series

[v6,4/4] target/riscv: Change the TLB page size depends on PMP entries.

Message ID 6b0bf48662ef26ab4c15381a08e78a74ebd7ca79.1595924470.git.zong.li@sifive.com (mailing list archive)
State New, archived
Headers show
Series Fix some PMP implementations | expand

Commit Message

Zong Li July 28, 2020, 8:26 a.m. UTC
The minimum granularity of PMP is 4 bytes, it is small than 4KB page
size, therefore, the pmp checking would be ignored if its range doesn't
start from the alignment of one page. This patch detects the pmp entries
and sets the small page size to TLB if there is a PMP entry which cover
the page size.

Signed-off-by: Zong Li <zong.li@sifive.com>
---
 target/riscv/cpu_helper.c | 10 ++++++--
 target/riscv/pmp.c        | 52 +++++++++++++++++++++++++++++++++++++++
 target/riscv/pmp.h        |  2 ++
 3 files changed, 62 insertions(+), 2 deletions(-)

Comments

Zong Li Aug. 5, 2020, 2:43 a.m. UTC | #1
On Tue, Jul 28, 2020 at 4:26 PM Zong Li <zong.li@sifive.com> wrote:
>
> The minimum granularity of PMP is 4 bytes, it is small than 4KB page
> size, therefore, the pmp checking would be ignored if its range doesn't
> start from the alignment of one page. This patch detects the pmp entries
> and sets the small page size to TLB if there is a PMP entry which cover
> the page size.
>
> Signed-off-by: Zong Li <zong.li@sifive.com>
> ---
>  target/riscv/cpu_helper.c | 10 ++++++--
>  target/riscv/pmp.c        | 52 +++++++++++++++++++++++++++++++++++++++
>  target/riscv/pmp.h        |  2 ++
>  3 files changed, 62 insertions(+), 2 deletions(-)
>
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index 2f337e418c..fd1d373b6f 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -693,6 +693,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>      bool first_stage_error = true;
>      int ret = TRANSLATE_FAIL;
>      int mode = mmu_idx;
> +    target_ulong tlb_size = 0;
>
>      env->guest_phys_fault_addr = 0;
>
> @@ -784,8 +785,13 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>      }
>
>      if (ret == TRANSLATE_SUCCESS) {
> -        tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
> -                     prot, mmu_idx, TARGET_PAGE_SIZE);
> +        if (pmp_is_range_in_tlb(env, pa & TARGET_PAGE_MASK, &tlb_size)) {
> +            tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
> +                         prot, mmu_idx, tlb_size);
> +        } else {
> +            tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
> +                         prot, mmu_idx, TARGET_PAGE_SIZE);
> +        }
>          return true;
>      } else if (probe) {
>          return false;
> diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> index aeba796484..adadf6e9ba 100644
> --- a/target/riscv/pmp.c
> +++ b/target/riscv/pmp.c
> @@ -393,3 +393,55 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
>
>      return val;
>  }
> +
> +/*
> + * Calculate the TLB size if the start address or the end address of
> + * PMP entry is presented in thie TLB page.
> + */
> +static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
> +    target_ulong tlb_sa, target_ulong tlb_ea)
> +{
> +    target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
> +    target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
> +
> +    if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) {
> +        return pmp_ea - pmp_sa + 1;
> +    }
> +
> +    if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) {
> +        return tlb_ea - pmp_sa + 1;
> +    }
> +
> +    if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) {
> +        return pmp_ea - tlb_sa + 1;
> +    }
> +
> +    return 0;
> +}
> +
> +/*
> + * Check is there a PMP entry whcih range covers this page. If so,
> + * try to find the minimum granularity for the TLB size.
> + */
> +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
> +    target_ulong *tlb_size)
> +{
> +    int i;
> +    target_ulong val;
> +    target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1);
> +
> +    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +        val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea);
> +        if (val) {
> +            if (*tlb_size == 0 || *tlb_size > val) {
> +                *tlb_size = val;
> +            }
> +        }
> +    }
> +
> +    if (*tlb_size != 0) {
> +        return true;
> +    }
> +
> +    return false;
> +}
> diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
> index 8e19793132..c70f2ea4c4 100644
> --- a/target/riscv/pmp.h
> +++ b/target/riscv/pmp.h
> @@ -60,5 +60,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
>  target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
>  bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
>      target_ulong size, pmp_priv_t priv, target_ulong mode);
> +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
> +    target_ulong *tlb_size);
>
>  #endif
> --
> 2.27.0
>

ping
Alistair Francis Aug. 12, 2020, 3:11 p.m. UTC | #2
On Tue, Jul 28, 2020 at 1:29 AM Zong Li <zong.li@sifive.com> wrote:
>
> The minimum granularity of PMP is 4 bytes, it is small than 4KB page
> size, therefore, the pmp checking would be ignored if its range doesn't
> start from the alignment of one page. This patch detects the pmp entries
> and sets the small page size to TLB if there is a PMP entry which cover
> the page size.
>
> Signed-off-by: Zong Li <zong.li@sifive.com>
> ---
>  target/riscv/cpu_helper.c | 10 ++++++--
>  target/riscv/pmp.c        | 52 +++++++++++++++++++++++++++++++++++++++
>  target/riscv/pmp.h        |  2 ++
>  3 files changed, 62 insertions(+), 2 deletions(-)
>
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index 2f337e418c..fd1d373b6f 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -693,6 +693,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>      bool first_stage_error = true;
>      int ret = TRANSLATE_FAIL;
>      int mode = mmu_idx;
> +    target_ulong tlb_size = 0;
>
>      env->guest_phys_fault_addr = 0;
>
> @@ -784,8 +785,13 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>      }
>
>      if (ret == TRANSLATE_SUCCESS) {
> -        tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
> -                     prot, mmu_idx, TARGET_PAGE_SIZE);
> +        if (pmp_is_range_in_tlb(env, pa & TARGET_PAGE_MASK, &tlb_size)) {
> +            tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
> +                         prot, mmu_idx, tlb_size);
> +        } else {
> +            tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
> +                         prot, mmu_idx, TARGET_PAGE_SIZE);
> +        }
>          return true;
>      } else if (probe) {
>          return false;
> diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> index aeba796484..adadf6e9ba 100644
> --- a/target/riscv/pmp.c
> +++ b/target/riscv/pmp.c
> @@ -393,3 +393,55 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
>
>      return val;
>  }
> +
> +/*
> + * Calculate the TLB size if the start address or the end address of
> + * PMP entry is presented in thie TLB page.
> + */
> +static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
> +    target_ulong tlb_sa, target_ulong tlb_ea)
> +{
> +    target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
> +    target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
> +
> +    if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) {
> +        return pmp_ea - pmp_sa + 1;
> +    }
> +
> +    if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) {
> +        return tlb_ea - pmp_sa + 1;
> +    }
> +
> +    if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) {
> +        return pmp_ea - tlb_sa + 1;
> +    }
> +
> +    return 0;
> +}
> +
> +/*
> + * Check is there a PMP entry whcih range covers this page. If so,

s/whcih/which/g

I fixed this when apply it.

> + * try to find the minimum granularity for the TLB size.
> + */
> +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
> +    target_ulong *tlb_size)
> +{
> +    int i;
> +    target_ulong val;
> +    target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1);
> +
> +    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +        val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea);
> +        if (val) {
> +            if (*tlb_size == 0 || *tlb_size > val) {
> +                *tlb_size = val;
> +            }
> +        }
> +    }
> +
> +    if (*tlb_size != 0) {
> +        return true;
> +    }
> +
> +    return false;
> +}
> diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
> index 8e19793132..c70f2ea4c4 100644
> --- a/target/riscv/pmp.h
> +++ b/target/riscv/pmp.h
> @@ -60,5 +60,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
>  target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
>  bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
>      target_ulong size, pmp_priv_t priv, target_ulong mode);
> +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
> +    target_ulong *tlb_size);

The indentation is wrong here (as it is in the rest of the file). I
just fixed this up as well as the others when I applied it.

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>

I have applied patch 3 and 4 of this series. Patch 1 has already been
applied and patch 2 no longer applies due to a different fix, sorry
about that.

Alistair

>
>  #endif
> --
> 2.27.0
>
>
Zong Li Aug. 13, 2020, 3:04 a.m. UTC | #3
On Wed, Aug 12, 2020 at 11:21 PM Alistair Francis <alistair23@gmail.com> wrote:
>
> On Tue, Jul 28, 2020 at 1:29 AM Zong Li <zong.li@sifive.com> wrote:
> >
> > The minimum granularity of PMP is 4 bytes, it is small than 4KB page
> > size, therefore, the pmp checking would be ignored if its range doesn't
> > start from the alignment of one page. This patch detects the pmp entries
> > and sets the small page size to TLB if there is a PMP entry which cover
> > the page size.
> >
> > Signed-off-by: Zong Li <zong.li@sifive.com>
> > ---
> >  target/riscv/cpu_helper.c | 10 ++++++--
> >  target/riscv/pmp.c        | 52 +++++++++++++++++++++++++++++++++++++++
> >  target/riscv/pmp.h        |  2 ++
> >  3 files changed, 62 insertions(+), 2 deletions(-)
> >
> > diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> > index 2f337e418c..fd1d373b6f 100644
> > --- a/target/riscv/cpu_helper.c
> > +++ b/target/riscv/cpu_helper.c
> > @@ -693,6 +693,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> >      bool first_stage_error = true;
> >      int ret = TRANSLATE_FAIL;
> >      int mode = mmu_idx;
> > +    target_ulong tlb_size = 0;
> >
> >      env->guest_phys_fault_addr = 0;
> >
> > @@ -784,8 +785,13 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> >      }
> >
> >      if (ret == TRANSLATE_SUCCESS) {
> > -        tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
> > -                     prot, mmu_idx, TARGET_PAGE_SIZE);
> > +        if (pmp_is_range_in_tlb(env, pa & TARGET_PAGE_MASK, &tlb_size)) {
> > +            tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
> > +                         prot, mmu_idx, tlb_size);
> > +        } else {
> > +            tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
> > +                         prot, mmu_idx, TARGET_PAGE_SIZE);
> > +        }
> >          return true;
> >      } else if (probe) {
> >          return false;
> > diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> > index aeba796484..adadf6e9ba 100644
> > --- a/target/riscv/pmp.c
> > +++ b/target/riscv/pmp.c
> > @@ -393,3 +393,55 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
> >
> >      return val;
> >  }
> > +
> > +/*
> > + * Calculate the TLB size if the start address or the end address of
> > + * PMP entry is presented in thie TLB page.
> > + */
> > +static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
> > +    target_ulong tlb_sa, target_ulong tlb_ea)
> > +{
> > +    target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
> > +    target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
> > +
> > +    if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) {
> > +        return pmp_ea - pmp_sa + 1;
> > +    }
> > +
> > +    if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) {
> > +        return tlb_ea - pmp_sa + 1;
> > +    }
> > +
> > +    if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) {
> > +        return pmp_ea - tlb_sa + 1;
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> > +/*
> > + * Check is there a PMP entry whcih range covers this page. If so,
>
> s/whcih/which/g
>
> I fixed this when apply it.
>
> > + * try to find the minimum granularity for the TLB size.
> > + */
> > +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
> > +    target_ulong *tlb_size)
> > +{
> > +    int i;
> > +    target_ulong val;
> > +    target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1);
> > +
> > +    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> > +        val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea);
> > +        if (val) {
> > +            if (*tlb_size == 0 || *tlb_size > val) {
> > +                *tlb_size = val;
> > +            }
> > +        }
> > +    }
> > +
> > +    if (*tlb_size != 0) {
> > +        return true;
> > +    }
> > +
> > +    return false;
> > +}
> > diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
> > index 8e19793132..c70f2ea4c4 100644
> > --- a/target/riscv/pmp.h
> > +++ b/target/riscv/pmp.h
> > @@ -60,5 +60,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
> >  target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
> >  bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
> >      target_ulong size, pmp_priv_t priv, target_ulong mode);
> > +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
> > +    target_ulong *tlb_size);
>
> The indentation is wrong here (as it is in the rest of the file). I
> just fixed this up as well as the others when I applied it.
>
> Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
>
> I have applied patch 3 and 4 of this series. Patch 1 has already been
> applied and patch 2 no longer applies due to a different fix, sorry
> about that.
>

Thanks for your reviewing and applying. I don't follow with you about
patch 2, could you please also forward the information or the fix to
me?

> Alistair
>
> >
> >  #endif
> > --
> > 2.27.0
> >
> >
Alistair Francis Aug. 13, 2020, 3:02 p.m. UTC | #4
On Wed, Aug 12, 2020 at 8:04 PM Zong Li <zong.li@sifive.com> wrote:
>
> On Wed, Aug 12, 2020 at 11:21 PM Alistair Francis <alistair23@gmail.com> wrote:
> >
> > On Tue, Jul 28, 2020 at 1:29 AM Zong Li <zong.li@sifive.com> wrote:
> > >
> > > The minimum granularity of PMP is 4 bytes, it is small than 4KB page
> > > size, therefore, the pmp checking would be ignored if its range doesn't
> > > start from the alignment of one page. This patch detects the pmp entries
> > > and sets the small page size to TLB if there is a PMP entry which cover
> > > the page size.
> > >
> > > Signed-off-by: Zong Li <zong.li@sifive.com>
> > > ---
> > >  target/riscv/cpu_helper.c | 10 ++++++--
> > >  target/riscv/pmp.c        | 52 +++++++++++++++++++++++++++++++++++++++
> > >  target/riscv/pmp.h        |  2 ++
> > >  3 files changed, 62 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> > > index 2f337e418c..fd1d373b6f 100644
> > > --- a/target/riscv/cpu_helper.c
> > > +++ b/target/riscv/cpu_helper.c
> > > @@ -693,6 +693,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> > >      bool first_stage_error = true;
> > >      int ret = TRANSLATE_FAIL;
> > >      int mode = mmu_idx;
> > > +    target_ulong tlb_size = 0;
> > >
> > >      env->guest_phys_fault_addr = 0;
> > >
> > > @@ -784,8 +785,13 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> > >      }
> > >
> > >      if (ret == TRANSLATE_SUCCESS) {
> > > -        tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
> > > -                     prot, mmu_idx, TARGET_PAGE_SIZE);
> > > +        if (pmp_is_range_in_tlb(env, pa & TARGET_PAGE_MASK, &tlb_size)) {
> > > +            tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
> > > +                         prot, mmu_idx, tlb_size);
> > > +        } else {
> > > +            tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
> > > +                         prot, mmu_idx, TARGET_PAGE_SIZE);
> > > +        }
> > >          return true;
> > >      } else if (probe) {
> > >          return false;
> > > diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> > > index aeba796484..adadf6e9ba 100644
> > > --- a/target/riscv/pmp.c
> > > +++ b/target/riscv/pmp.c
> > > @@ -393,3 +393,55 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
> > >
> > >      return val;
> > >  }
> > > +
> > > +/*
> > > + * Calculate the TLB size if the start address or the end address of
> > > + * PMP entry is presented in thie TLB page.
> > > + */
> > > +static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
> > > +    target_ulong tlb_sa, target_ulong tlb_ea)
> > > +{
> > > +    target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
> > > +    target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
> > > +
> > > +    if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) {
> > > +        return pmp_ea - pmp_sa + 1;
> > > +    }
> > > +
> > > +    if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) {
> > > +        return tlb_ea - pmp_sa + 1;
> > > +    }
> > > +
> > > +    if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) {
> > > +        return pmp_ea - tlb_sa + 1;
> > > +    }
> > > +
> > > +    return 0;
> > > +}
> > > +
> > > +/*
> > > + * Check is there a PMP entry whcih range covers this page. If so,
> >
> > s/whcih/which/g
> >
> > I fixed this when apply it.
> >
> > > + * try to find the minimum granularity for the TLB size.
> > > + */
> > > +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
> > > +    target_ulong *tlb_size)
> > > +{
> > > +    int i;
> > > +    target_ulong val;
> > > +    target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1);
> > > +
> > > +    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> > > +        val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea);
> > > +        if (val) {
> > > +            if (*tlb_size == 0 || *tlb_size > val) {
> > > +                *tlb_size = val;
> > > +            }
> > > +        }
> > > +    }
> > > +
> > > +    if (*tlb_size != 0) {
> > > +        return true;
> > > +    }
> > > +
> > > +    return false;
> > > +}
> > > diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
> > > index 8e19793132..c70f2ea4c4 100644
> > > --- a/target/riscv/pmp.h
> > > +++ b/target/riscv/pmp.h
> > > @@ -60,5 +60,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
> > >  target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
> > >  bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
> > >      target_ulong size, pmp_priv_t priv, target_ulong mode);
> > > +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
> > > +    target_ulong *tlb_size);
> >
> > The indentation is wrong here (as it is in the rest of the file). I
> > just fixed this up as well as the others when I applied it.
> >
> > Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
> >
> > I have applied patch 3 and 4 of this series. Patch 1 has already been
> > applied and patch 2 no longer applies due to a different fix, sorry
> > about that.
> >
>
> Thanks for your reviewing and applying. I don't follow with you about
> patch 2, could you please also forward the information or the fix to
> me?

This patch fixes the same problem and I find it a little simpler:
https://patchew.org/QEMU/20200812223045.96803-1-alistair.francis@wdc.com/20200812223045.96803-10-alistair.francis@wdc.com/

Alistair

>
> > Alistair
> >
> > >
> > >  #endif
> > > --
> > > 2.27.0
> > >
> > >
diff mbox series

Patch

diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 2f337e418c..fd1d373b6f 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -693,6 +693,7 @@  bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
     bool first_stage_error = true;
     int ret = TRANSLATE_FAIL;
     int mode = mmu_idx;
+    target_ulong tlb_size = 0;
 
     env->guest_phys_fault_addr = 0;
 
@@ -784,8 +785,13 @@  bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
     }
 
     if (ret == TRANSLATE_SUCCESS) {
-        tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
-                     prot, mmu_idx, TARGET_PAGE_SIZE);
+        if (pmp_is_range_in_tlb(env, pa & TARGET_PAGE_MASK, &tlb_size)) {
+            tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
+                         prot, mmu_idx, tlb_size);
+        } else {
+            tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
+                         prot, mmu_idx, TARGET_PAGE_SIZE);
+        }
         return true;
     } else if (probe) {
         return false;
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index aeba796484..adadf6e9ba 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -393,3 +393,55 @@  target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
 
     return val;
 }
+
+/*
+ * Calculate the TLB size if the start address or the end address of
+ * PMP entry is presented in thie TLB page.
+ */
+static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
+    target_ulong tlb_sa, target_ulong tlb_ea)
+{
+    target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
+    target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
+
+    if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) {
+        return pmp_ea - pmp_sa + 1;
+    }
+
+    if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) {
+        return tlb_ea - pmp_sa + 1;
+    }
+
+    if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) {
+        return pmp_ea - tlb_sa + 1;
+    }
+
+    return 0;
+}
+
+/*
+ * Check is there a PMP entry whcih range covers this page. If so,
+ * try to find the minimum granularity for the TLB size.
+ */
+bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
+    target_ulong *tlb_size)
+{
+    int i;
+    target_ulong val;
+    target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1);
+
+    for (i = 0; i < MAX_RISCV_PMPS; i++) {
+        val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea);
+        if (val) {
+            if (*tlb_size == 0 || *tlb_size > val) {
+                *tlb_size = val;
+            }
+        }
+    }
+
+    if (*tlb_size != 0) {
+        return true;
+    }
+
+    return false;
+}
diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
index 8e19793132..c70f2ea4c4 100644
--- a/target/riscv/pmp.h
+++ b/target/riscv/pmp.h
@@ -60,5 +60,7 @@  void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
 target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
 bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
     target_ulong size, pmp_priv_t priv, target_ulong mode);
+bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
+    target_ulong *tlb_size);
 
 #endif