diff mbox series

[v1,5/8] target/riscv: Implementation of enhanced PMP (ePMP)

Message ID 3d2dbabe5d5e25c1c88b9fda0bbdd5f154b2993e.1617367533.git.alistair.francis@wdc.com (mailing list archive)
State New, archived
Headers show
Series RISC-V: Add support for ePMP v0.9.1 | expand

Commit Message

Alistair Francis April 2, 2021, 12:48 p.m. UTC
From: Hou Weiying <weiying_hou@outlook.com>

This commit adds support for ePMP v0.9.1.

The ePMP spec can be found in:
https://docs.google.com/document/d/1Mh_aiHYxemL0umN3GTTw8vsbmzHZ_nxZXgjgOUzbvc8

Signed-off-by: Hongzheng-Li <Ethan.Lee.QNL@gmail.com>
Signed-off-by: Hou Weiying <weiying_hou@outlook.com>
Signed-off-by: Myriad-Dreamin <camiyoru@gmail.com>
Message-Id: <SG2PR02MB263462CCDBCBBAD36983C2CD93450@SG2PR02MB2634.apcprd02.prod.outlook.com>
[ Changes by AF:
 - Rebase on master
 - Update to latest spec
 - Use a switch case to handle ePMP MML permissions
 - Fix a few bugs
]
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
---
 target/riscv/pmp.c | 165 +++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 153 insertions(+), 12 deletions(-)

Comments

Bin Meng April 9, 2021, 4:24 a.m. UTC | #1
On Fri, Apr 2, 2021 at 8:50 PM Alistair Francis
<alistair.francis@wdc.com> wrote:
>
> From: Hou Weiying <weiying_hou@outlook.com>
>
> This commit adds support for ePMP v0.9.1.
>
> The ePMP spec can be found in:
> https://docs.google.com/document/d/1Mh_aiHYxemL0umN3GTTw8vsbmzHZ_nxZXgjgOUzbvc8
>
> Signed-off-by: Hongzheng-Li <Ethan.Lee.QNL@gmail.com>
> Signed-off-by: Hou Weiying <weiying_hou@outlook.com>
> Signed-off-by: Myriad-Dreamin <camiyoru@gmail.com>
> Message-Id: <SG2PR02MB263462CCDBCBBAD36983C2CD93450@SG2PR02MB2634.apcprd02.prod.outlook.com>
> [ Changes by AF:
>  - Rebase on master
>  - Update to latest spec
>  - Use a switch case to handle ePMP MML permissions
>  - Fix a few bugs
> ]
> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
> ---
>  target/riscv/pmp.c | 165 +++++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 153 insertions(+), 12 deletions(-)
>
> diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> index 1d071b044b..3794c808e8 100644
> --- a/target/riscv/pmp.c
> +++ b/target/riscv/pmp.c
> @@ -90,11 +90,42 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
>  static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
>  {
>      if (pmp_index < MAX_RISCV_PMPS) {
> -        if (!pmp_is_locked(env, pmp_index)) {
> -            env->pmp_state.pmp[pmp_index].cfg_reg = val;
> -            pmp_update_rule(env, pmp_index);
> +        bool locked = true;
> +
> +        if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
> +            /* mseccfg.RLB is set */
> +            if (MSECCFG_RLB_ISSET(env)) {
> +                locked = false;
> +            }
> +
> +            /* mseccfg.MML is not set */
> +            if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
> +                locked = false;
> +            }
> +
> +            /* mseccfg.MML is set */
> +            if (MSECCFG_MML_ISSET(env)) {
> +                /* not adding execute bit */
> +                if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
> +                    locked = false;
> +                }
> +                 /* shared region and not adding X bit*/

nits: /* is not aligned, and a space is needed before */

> +                if ((val & PMP_LOCK) != PMP_LOCK &&
> +                    (val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
> +                    locked = false;
> +                }
> +            }
>          } else {
> +            if (!pmp_is_locked(env, pmp_index)) {
> +                locked = false;
> +            }
> +        }
> +
> +        if (locked) {
>              qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
> +        } else {
> +            env->pmp_state.pmp[pmp_index].cfg_reg = val;
> +            pmp_update_rule(env, pmp_index);
>          }
>      } else {
>          qemu_log_mask(LOG_GUEST_ERROR,
> @@ -217,6 +248,33 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr,
>  {
>      bool ret;
>
> +    if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
> +        if (MSECCFG_MMWP_ISSET(env)) {
> +            /*
> +             * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
> +             * so we default to deny all, even for M mode.

nits: M-mode

> +             */
> +            *allowed_privs = 0;
> +            return false;
> +        } else if (MSECCFG_MML_ISSET(env)) {
> +            /*
> +             * The Machine Mode Lockdown (mseccfg.MML) bit is set
> +             * so we can only execute code in M mode with an applicable

nits: M-mode

> +             * rule.
> +             * Other modes are disabled.

nits: this line can be put in the same line of "rule."

> +             */
> +            if (mode == PRV_M && !(privs & PMP_EXEC)) {
> +                ret = true;
> +                *allowed_privs = PMP_READ | PMP_WRITE;
> +            } else {
> +                ret = false;
> +                *allowed_privs = 0;
> +            }
> +
> +            return ret;
> +        }

If I understand the spec correctly, I think we are missing a branch to
handle MML unset case, in which RWX is allowed in M-mode.

> +    }
> +
>      if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) {
>          /*
>           * Privileged spec v1.10 states if HW doesn't implement any PMP entry
> @@ -294,13 +352,94 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
>              pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
>
>          /*
> -         * If the PMP entry is not off and the address is in range, do the priv
> -         * check
> +         * Convert the PMP permissions to match the truth table in the
> +         * ePMP spec.
>           */
> +        const uint8_t epmp_operation =
> +            ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
> +            ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
> +            (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
> +            ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
> +
>          if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
> -            *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
> -            if ((mode != PRV_M) || pmp_is_locked(env, i)) {
> -                *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
> +            /*
> +             * If the PMP entry is not off and the address is in range,
> +             * do the priv check
> +             */
> +            if (!MSECCFG_MML_ISSET(env)) {
> +                /*
> +                 * If mseccfg.MML Bit is not set, do pmp priv check
> +                 * This will always apply to regular PMP.
> +                 */
> +                *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
> +                if ((mode != PRV_M) || pmp_is_locked(env, i)) {
> +                    *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
> +                }
> +            } else {
> +                /*
> +                 * If mseccfg.MML Bit set, do the enhanced pmp priv check
> +                 */
> +                if (mode == PRV_M) {
> +                    switch (epmp_operation) {
> +                    case 0:
> +                    case 1:
> +                    case 4:
> +                    case 5:
> +                    case 6:
> +                    case 7:
> +                    case 8:
> +                        *allowed_privs = 0;
> +                        break;
> +                    case 2:
> +                    case 3:
> +                    case 14:
> +                        *allowed_privs = PMP_READ | PMP_WRITE;
> +                        break;
> +                    case 9:
> +                    case 10:
> +                        *allowed_privs = PMP_EXEC;
> +                        break;
> +                    case 11:
> +                    case 13:
> +                        *allowed_privs = PMP_READ | PMP_EXEC;
> +                        break;
> +                    case 12:
> +                    case 15:
> +                        *allowed_privs = PMP_READ;
> +                        break;
> +                    }
> +                } else {
> +                    switch (epmp_operation) {
> +                    case 0:
> +                    case 8:
> +                    case 9:
> +                    case 12:
> +                    case 13:
> +                    case 14:
> +                        *allowed_privs = 0;
> +                        break;
> +                    case 1:
> +                    case 10:
> +                    case 11:
> +                        *allowed_privs = PMP_EXEC;
> +                        break;
> +                    case 2:
> +                    case 4:
> +                    case 15:
> +                        *allowed_privs = PMP_READ;
> +                        break;
> +                    case 3:
> +                    case 6:
> +                        *allowed_privs = PMP_READ | PMP_WRITE;
> +                        break;
> +                    case 5:
> +                        *allowed_privs = PMP_READ | PMP_EXEC;
> +                        break;
> +                    case 7:
> +                        *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
> +                        break;
> +                    }
> +                }
>              }
>
>              ret = ((privs & *allowed_privs) == privs);
> @@ -328,10 +467,12 @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
>
>      trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
>
> -    if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
> -        qemu_log_mask(LOG_GUEST_ERROR,
> -                      "ignoring pmpcfg write - incorrect address\n");
> -        return;
> +    if (!riscv_feature(env, RISCV_FEATURE_EPMP) || !MSECCFG_RLB_ISSET(env)) {
> +        if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
> +            qemu_log_mask(LOG_GUEST_ERROR,
> +                          "ignoring pmpcfg write - incorrect address\n");

If ePMP RLB is off, this log message is inaccurate and misleading.

> +            return;
> +        }
>      }
>
>      for (i = 0; i < sizeof(target_ulong); i++) {
> --

Regards,
Bin
Alistair Francis April 11, 2021, 11:03 p.m. UTC | #2
On Fri, Apr 9, 2021 at 2:24 PM Bin Meng <bmeng.cn@gmail.com> wrote:
>
> On Fri, Apr 2, 2021 at 8:50 PM Alistair Francis
> <alistair.francis@wdc.com> wrote:
> >
> > From: Hou Weiying <weiying_hou@outlook.com>
> >
> > This commit adds support for ePMP v0.9.1.
> >
> > The ePMP spec can be found in:
> > https://docs.google.com/document/d/1Mh_aiHYxemL0umN3GTTw8vsbmzHZ_nxZXgjgOUzbvc8
> >
> > Signed-off-by: Hongzheng-Li <Ethan.Lee.QNL@gmail.com>
> > Signed-off-by: Hou Weiying <weiying_hou@outlook.com>
> > Signed-off-by: Myriad-Dreamin <camiyoru@gmail.com>
> > Message-Id: <SG2PR02MB263462CCDBCBBAD36983C2CD93450@SG2PR02MB2634.apcprd02.prod.outlook.com>
> > [ Changes by AF:
> >  - Rebase on master
> >  - Update to latest spec
> >  - Use a switch case to handle ePMP MML permissions
> >  - Fix a few bugs
> > ]
> > Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
> > ---
> >  target/riscv/pmp.c | 165 +++++++++++++++++++++++++++++++++++++++++----
> >  1 file changed, 153 insertions(+), 12 deletions(-)
> >
> > diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> > index 1d071b044b..3794c808e8 100644
> > --- a/target/riscv/pmp.c
> > +++ b/target/riscv/pmp.c
> > @@ -90,11 +90,42 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
> >  static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
> >  {
> >      if (pmp_index < MAX_RISCV_PMPS) {
> > -        if (!pmp_is_locked(env, pmp_index)) {
> > -            env->pmp_state.pmp[pmp_index].cfg_reg = val;
> > -            pmp_update_rule(env, pmp_index);
> > +        bool locked = true;
> > +
> > +        if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
> > +            /* mseccfg.RLB is set */
> > +            if (MSECCFG_RLB_ISSET(env)) {
> > +                locked = false;
> > +            }
> > +
> > +            /* mseccfg.MML is not set */
> > +            if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
> > +                locked = false;
> > +            }
> > +
> > +            /* mseccfg.MML is set */
> > +            if (MSECCFG_MML_ISSET(env)) {
> > +                /* not adding execute bit */
> > +                if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
> > +                    locked = false;
> > +                }
> > +                 /* shared region and not adding X bit*/
>
> nits: /* is not aligned, and a space is needed before */
>
> > +                if ((val & PMP_LOCK) != PMP_LOCK &&
> > +                    (val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
> > +                    locked = false;
> > +                }
> > +            }
> >          } else {
> > +            if (!pmp_is_locked(env, pmp_index)) {
> > +                locked = false;
> > +            }
> > +        }
> > +
> > +        if (locked) {
> >              qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
> > +        } else {
> > +            env->pmp_state.pmp[pmp_index].cfg_reg = val;
> > +            pmp_update_rule(env, pmp_index);
> >          }
> >      } else {
> >          qemu_log_mask(LOG_GUEST_ERROR,
> > @@ -217,6 +248,33 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr,
> >  {
> >      bool ret;
> >
> > +    if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
> > +        if (MSECCFG_MMWP_ISSET(env)) {
> > +            /*
> > +             * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
> > +             * so we default to deny all, even for M mode.
>
> nits: M-mode
>
> > +             */
> > +            *allowed_privs = 0;
> > +            return false;
> > +        } else if (MSECCFG_MML_ISSET(env)) {
> > +            /*
> > +             * The Machine Mode Lockdown (mseccfg.MML) bit is set
> > +             * so we can only execute code in M mode with an applicable
>
> nits: M-mode
>
> > +             * rule.
> > +             * Other modes are disabled.
>
> nits: this line can be put in the same line of "rule."
>
> > +             */
> > +            if (mode == PRV_M && !(privs & PMP_EXEC)) {
> > +                ret = true;
> > +                *allowed_privs = PMP_READ | PMP_WRITE;
> > +            } else {
> > +                ret = false;
> > +                *allowed_privs = 0;
> > +            }
> > +
> > +            return ret;
> > +        }
>
> If I understand the spec correctly, I think we are missing a branch to
> handle MML unset case, in which RWX is allowed in M-mode.

Yep, so if MML and MMWP aren't set then we just fall back to the
standard PMP checks which are below. So M-mode accesses will be
allowed and other privs won't be.

>
> > +    }
> > +
> >      if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) {
> >          /*
> >           * Privileged spec v1.10 states if HW doesn't implement any PMP entry
> > @@ -294,13 +352,94 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
> >              pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
> >
> >          /*
> > -         * If the PMP entry is not off and the address is in range, do the priv
> > -         * check
> > +         * Convert the PMP permissions to match the truth table in the
> > +         * ePMP spec.
> >           */
> > +        const uint8_t epmp_operation =
> > +            ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
> > +            ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
> > +            (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
> > +            ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
> > +
> >          if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
> > -            *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
> > -            if ((mode != PRV_M) || pmp_is_locked(env, i)) {
> > -                *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
> > +            /*
> > +             * If the PMP entry is not off and the address is in range,
> > +             * do the priv check
> > +             */
> > +            if (!MSECCFG_MML_ISSET(env)) {
> > +                /*
> > +                 * If mseccfg.MML Bit is not set, do pmp priv check
> > +                 * This will always apply to regular PMP.
> > +                 */
> > +                *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
> > +                if ((mode != PRV_M) || pmp_is_locked(env, i)) {
> > +                    *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
> > +                }
> > +            } else {
> > +                /*
> > +                 * If mseccfg.MML Bit set, do the enhanced pmp priv check
> > +                 */
> > +                if (mode == PRV_M) {
> > +                    switch (epmp_operation) {
> > +                    case 0:
> > +                    case 1:
> > +                    case 4:
> > +                    case 5:
> > +                    case 6:
> > +                    case 7:
> > +                    case 8:
> > +                        *allowed_privs = 0;
> > +                        break;
> > +                    case 2:
> > +                    case 3:
> > +                    case 14:
> > +                        *allowed_privs = PMP_READ | PMP_WRITE;
> > +                        break;
> > +                    case 9:
> > +                    case 10:
> > +                        *allowed_privs = PMP_EXEC;
> > +                        break;
> > +                    case 11:
> > +                    case 13:
> > +                        *allowed_privs = PMP_READ | PMP_EXEC;
> > +                        break;
> > +                    case 12:
> > +                    case 15:
> > +                        *allowed_privs = PMP_READ;
> > +                        break;
> > +                    }
> > +                } else {
> > +                    switch (epmp_operation) {
> > +                    case 0:
> > +                    case 8:
> > +                    case 9:
> > +                    case 12:
> > +                    case 13:
> > +                    case 14:
> > +                        *allowed_privs = 0;
> > +                        break;
> > +                    case 1:
> > +                    case 10:
> > +                    case 11:
> > +                        *allowed_privs = PMP_EXEC;
> > +                        break;
> > +                    case 2:
> > +                    case 4:
> > +                    case 15:
> > +                        *allowed_privs = PMP_READ;
> > +                        break;
> > +                    case 3:
> > +                    case 6:
> > +                        *allowed_privs = PMP_READ | PMP_WRITE;
> > +                        break;
> > +                    case 5:
> > +                        *allowed_privs = PMP_READ | PMP_EXEC;
> > +                        break;
> > +                    case 7:
> > +                        *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
> > +                        break;
> > +                    }
> > +                }
> >              }
> >
> >              ret = ((privs & *allowed_privs) == privs);
> > @@ -328,10 +467,12 @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
> >
> >      trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
> >
> > -    if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
> > -        qemu_log_mask(LOG_GUEST_ERROR,
> > -                      "ignoring pmpcfg write - incorrect address\n");
> > -        return;
> > +    if (!riscv_feature(env, RISCV_FEATURE_EPMP) || !MSECCFG_RLB_ISSET(env)) {
> > +        if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
> > +            qemu_log_mask(LOG_GUEST_ERROR,
> > +                          "ignoring pmpcfg write - incorrect address\n");
>
> If ePMP RLB is off, this log message is inaccurate and misleading.

Thanks, I have fixed this and all the other comments.

Alistair

>
> > +            return;
> > +        }
> >      }
> >
> >      for (i = 0; i < sizeof(target_ulong); i++) {
> > --
>
> Regards,
> Bin
diff mbox series

Patch

diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 1d071b044b..3794c808e8 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -90,11 +90,42 @@  static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
 static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
 {
     if (pmp_index < MAX_RISCV_PMPS) {
-        if (!pmp_is_locked(env, pmp_index)) {
-            env->pmp_state.pmp[pmp_index].cfg_reg = val;
-            pmp_update_rule(env, pmp_index);
+        bool locked = true;
+
+        if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
+            /* mseccfg.RLB is set */
+            if (MSECCFG_RLB_ISSET(env)) {
+                locked = false;
+            }
+
+            /* mseccfg.MML is not set */
+            if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
+                locked = false;
+            }
+
+            /* mseccfg.MML is set */
+            if (MSECCFG_MML_ISSET(env)) {
+                /* not adding execute bit */
+                if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
+                    locked = false;
+                }
+                 /* shared region and not adding X bit*/
+                if ((val & PMP_LOCK) != PMP_LOCK &&
+                    (val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
+                    locked = false;
+                }
+            }
         } else {
+            if (!pmp_is_locked(env, pmp_index)) {
+                locked = false;
+            }
+        }
+
+        if (locked) {
             qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
+        } else {
+            env->pmp_state.pmp[pmp_index].cfg_reg = val;
+            pmp_update_rule(env, pmp_index);
         }
     } else {
         qemu_log_mask(LOG_GUEST_ERROR,
@@ -217,6 +248,33 @@  static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr,
 {
     bool ret;
 
+    if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
+        if (MSECCFG_MMWP_ISSET(env)) {
+            /*
+             * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
+             * so we default to deny all, even for M mode.
+             */
+            *allowed_privs = 0;
+            return false;
+        } else if (MSECCFG_MML_ISSET(env)) {
+            /*
+             * The Machine Mode Lockdown (mseccfg.MML) bit is set
+             * so we can only execute code in M mode with an applicable
+             * rule.
+             * Other modes are disabled.
+             */
+            if (mode == PRV_M && !(privs & PMP_EXEC)) {
+                ret = true;
+                *allowed_privs = PMP_READ | PMP_WRITE;
+            } else {
+                ret = false;
+                *allowed_privs = 0;
+            }
+
+            return ret;
+        }
+    }
+
     if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) {
         /*
          * Privileged spec v1.10 states if HW doesn't implement any PMP entry
@@ -294,13 +352,94 @@  bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
             pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
 
         /*
-         * If the PMP entry is not off and the address is in range, do the priv
-         * check
+         * Convert the PMP permissions to match the truth table in the
+         * ePMP spec.
          */
+        const uint8_t epmp_operation =
+            ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
+            ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
+            (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
+            ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
+
         if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
-            *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
-            if ((mode != PRV_M) || pmp_is_locked(env, i)) {
-                *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
+            /*
+             * If the PMP entry is not off and the address is in range,
+             * do the priv check
+             */
+            if (!MSECCFG_MML_ISSET(env)) {
+                /*
+                 * If mseccfg.MML Bit is not set, do pmp priv check
+                 * This will always apply to regular PMP.
+                 */
+                *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+                if ((mode != PRV_M) || pmp_is_locked(env, i)) {
+                    *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
+                }
+            } else {
+                /*
+                 * If mseccfg.MML Bit set, do the enhanced pmp priv check
+                 */
+                if (mode == PRV_M) {
+                    switch (epmp_operation) {
+                    case 0:
+                    case 1:
+                    case 4:
+                    case 5:
+                    case 6:
+                    case 7:
+                    case 8:
+                        *allowed_privs = 0;
+                        break;
+                    case 2:
+                    case 3:
+                    case 14:
+                        *allowed_privs = PMP_READ | PMP_WRITE;
+                        break;
+                    case 9:
+                    case 10:
+                        *allowed_privs = PMP_EXEC;
+                        break;
+                    case 11:
+                    case 13:
+                        *allowed_privs = PMP_READ | PMP_EXEC;
+                        break;
+                    case 12:
+                    case 15:
+                        *allowed_privs = PMP_READ;
+                        break;
+                    }
+                } else {
+                    switch (epmp_operation) {
+                    case 0:
+                    case 8:
+                    case 9:
+                    case 12:
+                    case 13:
+                    case 14:
+                        *allowed_privs = 0;
+                        break;
+                    case 1:
+                    case 10:
+                    case 11:
+                        *allowed_privs = PMP_EXEC;
+                        break;
+                    case 2:
+                    case 4:
+                    case 15:
+                        *allowed_privs = PMP_READ;
+                        break;
+                    case 3:
+                    case 6:
+                        *allowed_privs = PMP_READ | PMP_WRITE;
+                        break;
+                    case 5:
+                        *allowed_privs = PMP_READ | PMP_EXEC;
+                        break;
+                    case 7:
+                        *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+                        break;
+                    }
+                }
             }
 
             ret = ((privs & *allowed_privs) == privs);
@@ -328,10 +467,12 @@  void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
 
     trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
 
-    if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
-        qemu_log_mask(LOG_GUEST_ERROR,
-                      "ignoring pmpcfg write - incorrect address\n");
-        return;
+    if (!riscv_feature(env, RISCV_FEATURE_EPMP) || !MSECCFG_RLB_ISSET(env)) {
+        if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
+            qemu_log_mask(LOG_GUEST_ERROR,
+                          "ignoring pmpcfg write - incorrect address\n");
+            return;
+        }
     }
 
     for (i = 0; i < sizeof(target_ulong); i++) {