diff mbox series

[v3,14/20] target/riscv: mmu changes for zicfiss shadow stack protection

Message ID 20240807000652.1417776-15-debug@rivosinc.com (mailing list archive)
State New, archived
Headers show
Series riscv support for control flow integrity extensions | expand

Commit Message

Deepak Gupta Aug. 7, 2024, 12:06 a.m. UTC
zicfiss protects shadow stack using new page table encodings PTE.W=0,
PTE.R=0 and PTE.X=0. This encoding is reserved if zicfiss is not
implemented or if shadow stack are not enabled.
Loads on shadow stack memory are allowed while stores to shadow stack
memory leads to access faults. Shadow stack accesses to RO memory
leads to store page fault.

To implement special nature of shadow stack memory where only selected
stores (shadow stack stores from sspush) have to be allowed while rest
of regular stores disallowed, new MMU TLB index is created for shadow
stack.

Signed-off-by: Deepak Gupta <debug@rivosinc.com>
---
 target/riscv/cpu_helper.c | 61 +++++++++++++++++++++++++++++++++++++--
 target/riscv/internals.h  |  3 ++
 2 files changed, 62 insertions(+), 2 deletions(-)

Comments

Richard Henderson Aug. 7, 2024, 3:19 a.m. UTC | #1
On 8/7/24 10:06, Deepak Gupta wrote:
> @@ -1105,15 +1119,45 @@ restart:
>           return TRANSLATE_FAIL;
>       }
>   
> +    /*
> +     * When backward CFI is enabled, the R=0, W=1, X=0 reserved encoding
> +     * is used to mark Shadow Stack (SS) pages. If back CFI enabled, allow
> +     * normal loads on SS pages, regular stores raise store access fault
> +     * and avoid hitting the reserved-encoding case. Only shadow stack
> +     * stores are allowed on SS pages. Shadow stack loads and stores on
> +     * regular memory (non-SS) raise load and store/AMO access fault.
> +     * Second stage translations don't participate in Shadow Stack.
> +     */
> +    sstack_page = (cpu_get_bcfien(env) && first_stage &&
> +                  ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W));
> +
>       /* Check for reserved combinations of RWX flags. */
>       switch (pte & (PTE_R | PTE_W | PTE_X)) {
> -    case PTE_W:
>       case PTE_W | PTE_X:
> +    case PTE_W:
> +        if (sstack_page) { /* if shadow stack page, PTE_W is not reserved */
> +            break;

This is oddly written, and duplicates checks.  Better as

     switch (pte & RWX) {
     case W | X:
         return TRANSLATE_FAIL;
     case W:
         if (bcfi && first_stage) {
             sstack_page = true;
             break;
         }
         return TRANSLATE_FAIL;
     }


> +    /* Illegal combo of instruction type and page attribute */
> +    if (!legal_sstack_access(access_type, is_sstack_insn,
> +                            sstack_page)) {
> +        /* shadow stack instruction and RO page then it's a page fault */
> +        if (is_sstack_insn && ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_R)) {
> +                return TRANSLATE_FAIL;
> +        }
> +        /* In all other cases it's an access fault, so raise PMP_FAIL */
> +        return TRANSLATE_PMP_FAIL;
> +    }
> +
>       int prot = 0;
> -    if (pte & PTE_R) {
> +    /*
> +     * If PTE has read bit in it or it's shadow stack page,
> +     * then reads allowed
> +     */
> +    if ((pte & PTE_R) || sstack_page) {
>           prot |= PAGE_READ;
>       }

I feel like this logic could be simplified somehow.
I'll think about it.

> @@ -1409,6 +1461,11 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>       qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
>                     __func__, address, access_type, mmu_idx);
>   
> +    /* If shadow stack instruction initiated this access, treat it as store */
> +    if (mmu_idx & MMU_IDX_SS_ACCESS) {
> +        access_type = MMU_DATA_STORE;
> +    }

I know you're trying to massage the fault type, but I think this is the wrong place.


r~
Deepak Gupta Aug. 9, 2024, 6:55 p.m. UTC | #2
On Wed, Aug 07, 2024 at 01:19:55PM +1000, Richard Henderson wrote:
>On 8/7/24 10:06, Deepak Gupta wrote:
>>      int prot = 0;
>>-    if (pte & PTE_R) {
>>+    /*
>>+     * If PTE has read bit in it or it's shadow stack page,
>>+     * then reads allowed
>>+     */
>>+    if ((pte & PTE_R) || sstack_page) {
>>          prot |= PAGE_READ;
>>      }
>
>I feel like this logic could be simplified somehow.
>I'll think about it.

Ok let me know.

>
>>@@ -1409,6 +1461,11 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>>      qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
>>                    __func__, address, access_type, mmu_idx);
>>+    /* If shadow stack instruction initiated this access, treat it as store */
>>+    if (mmu_idx & MMU_IDX_SS_ACCESS) {
>>+        access_type = MMU_DATA_STORE;
>>+    }
>
>I know you're trying to massage the fault type, but I think this is the wrong place.

Is it okay if I add `mmu_idx` argument to `raise_mmu_exception` ?
Inside `raise_mmu_exception`, then based on `mmu_idx == shadow stack index`, I can convert
a fault due to access_type=MMU_DATA_LOAD into store page fault.

>
>
>r~
Richard Henderson Aug. 11, 2024, 10:23 p.m. UTC | #3
On 8/10/24 04:55, Deepak Gupta wrote:
> On Wed, Aug 07, 2024 at 01:19:55PM +1000, Richard Henderson wrote:
>> On 8/7/24 10:06, Deepak Gupta wrote:
>>>      int prot = 0;
>>> -    if (pte & PTE_R) {
>>> +    /*
>>> +     * If PTE has read bit in it or it's shadow stack page,
>>> +     * then reads allowed
>>> +     */
>>> +    if ((pte & PTE_R) || sstack_page) {
>>>          prot |= PAGE_READ;
>>>      }
>>
>> I feel like this logic could be simplified somehow.
>> I'll think about it.
> 
> Ok let me know.
> 
>>
>>> @@ -1409,6 +1461,11 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>>>      qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
>>>                    __func__, address, access_type, mmu_idx);
>>> +    /* If shadow stack instruction initiated this access, treat it as store */
>>> +    if (mmu_idx & MMU_IDX_SS_ACCESS) {
>>> +        access_type = MMU_DATA_STORE;
>>> +    }
>>
>> I know you're trying to massage the fault type, but I think this is the wrong place.
> 
> Is it okay if I add `mmu_idx` argument to `raise_mmu_exception` ?
> Inside `raise_mmu_exception`, then based on `mmu_idx == shadow stack index`, I can convert
> a fault due to access_type=MMU_DATA_LOAD into store page fault.

We have other places where we miss-categorize amo instructions and raise the wrong fault, 
I think particularly without smp, when we implement amo without host atomic operations. 
We should perhaps come up with a general purpose solution.

For instance, set TARGET_INSN_START_EXTRA_WORDS to 2.  In the second extra unwind word, 
set bit 0 to 1 if the instruction should raise STORE_AMO on a read fault.  Handle this in 
raise_mmu_exception, which would then need to perform the restore and exit itself.


r~
diff mbox series

Patch

diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index fb6c0d4e1f..5d5da8dce1 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -820,6 +820,18 @@  void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
     env->load_res = -1;
 }
 
+static bool legal_sstack_access(int access_type, bool sstack_inst,
+                                bool sstack_attribute)
+{
+    /*
+     * Read/write/execution permissions are checked as usual. Shadow
+     * stack enforcement is just that (1) instruction type must match
+     * the attribute unless (2) a non-SS load to an SS region.
+     */
+    return (sstack_inst == sstack_attribute) ||
+        ((access_type == MMU_DATA_LOAD) && sstack_attribute);
+}
+
 /*
  * get_physical_address_pmp - check PMP permission for this physical address
  *
@@ -897,6 +909,8 @@  static int get_physical_address(CPURISCVState *env, hwaddr *physical,
     hwaddr ppn;
     int napot_bits = 0;
     target_ulong napot_mask;
+    bool is_sstack_insn = ((mmu_idx & MMU_IDX_SS_ACCESS) == MMU_IDX_SS_ACCESS);
+    bool sstack_page = false;
 
     /*
      * Check if we should use the background registers for the two
@@ -1105,15 +1119,45 @@  restart:
         return TRANSLATE_FAIL;
     }
 
+    /*
+     * When backward CFI is enabled, the R=0, W=1, X=0 reserved encoding
+     * is used to mark Shadow Stack (SS) pages. If back CFI enabled, allow
+     * normal loads on SS pages, regular stores raise store access fault
+     * and avoid hitting the reserved-encoding case. Only shadow stack
+     * stores are allowed on SS pages. Shadow stack loads and stores on
+     * regular memory (non-SS) raise load and store/AMO access fault.
+     * Second stage translations don't participate in Shadow Stack.
+     */
+    sstack_page = (cpu_get_bcfien(env) && first_stage &&
+                  ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W));
+
     /* Check for reserved combinations of RWX flags. */
     switch (pte & (PTE_R | PTE_W | PTE_X)) {
-    case PTE_W:
     case PTE_W | PTE_X:
+    case PTE_W:
+        if (sstack_page) { /* if shadow stack page, PTE_W is not reserved */
+            break;
+        }
         return TRANSLATE_FAIL;
     }
 
+    /* Illegal combo of instruction type and page attribute */
+    if (!legal_sstack_access(access_type, is_sstack_insn,
+                            sstack_page)) {
+        /* shadow stack instruction and RO page then it's a page fault */
+        if (is_sstack_insn && ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_R)) {
+                return TRANSLATE_FAIL;
+        }
+        /* In all other cases it's an access fault, so raise PMP_FAIL */
+        return TRANSLATE_PMP_FAIL;
+    }
+
     int prot = 0;
-    if (pte & PTE_R) {
+    /*
+     * If PTE has read bit in it or it's shadow stack page,
+     * then reads allowed
+     */
+    if ((pte & PTE_R) || sstack_page) {
         prot |= PAGE_READ;
     }
     if (pte & PTE_W) {
@@ -1351,9 +1395,17 @@  void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
         break;
     case MMU_DATA_LOAD:
         cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
+        /* shadow stack mis aligned accesses are access faults */
+        if (mmu_idx & MMU_IDX_SS_ACCESS) {
+            cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
+        }
         break;
     case MMU_DATA_STORE:
         cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
+        /* shadow stack mis aligned accesses are access faults */
+        if (mmu_idx & MMU_IDX_SS_ACCESS) {
+            cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+        }
         break;
     default:
         g_assert_not_reached();
@@ -1409,6 +1461,11 @@  bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
     qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
                   __func__, address, access_type, mmu_idx);
 
+    /* If shadow stack instruction initiated this access, treat it as store */
+    if (mmu_idx & MMU_IDX_SS_ACCESS) {
+        access_type = MMU_DATA_STORE;
+    }
+
     pmu_tlb_fill_incr_ctr(cpu, access_type);
     if (two_stage_lookup) {
         /* Two stage lookup */
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
index 0ac17bc5ad..dad0657c80 100644
--- a/target/riscv/internals.h
+++ b/target/riscv/internals.h
@@ -30,12 +30,15 @@ 
  *  - U+2STAGE          0b100
  *  - S+2STAGE          0b101
  *  - S+SUM+2STAGE      0b110
+ *  - Shadow stack+U   0b1000
+ *  - Shadow stack+S   0b1001
  */
 #define MMUIdx_U            0
 #define MMUIdx_S            1
 #define MMUIdx_S_SUM        2
 #define MMUIdx_M            3
 #define MMU_2STAGE_BIT      (1 << 2)
+#define MMU_IDX_SS_ACCESS   (1 << 3)
 
 static inline int mmuidx_priv(int mmu_idx)
 {