@@ -553,6 +553,29 @@ static inline int pte_huge(pte_t pte)
return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
}
+#ifdef CONFIG_RISCV_USE_SW_PAGE
+static inline int pte_dirty(pte_t pte)
+{
+ unsigned int i;
+
+ for (i = 0; i < HW_PAGES_PER_PAGE; i++)
+ if (pte.ptes[i] & _PAGE_DIRTY)
+ return 1;
+
+ return 0;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ unsigned int i;
+
+ for (i = 0; i < HW_PAGES_PER_PAGE; i++)
+ if (pte.ptes[i] & _PAGE_ACCESSED)
+ return 1;
+
+ return 0;
+}
+#else
static inline int pte_dirty(pte_t pte)
{
return pte_val(pte) & _PAGE_DIRTY;
@@ -562,6 +585,7 @@ static inline int pte_young(pte_t pte)
{
return pte_val(pte) & _PAGE_ACCESSED;
}
+#endif /* CONFIG_RISCV_USE_SW_PAGE */
static inline int pte_special(pte_t pte)
{
CPU that supports only 4K MMU usually updates access/dirty bit at 4K pte level. As each software page can contains multiple 4K hardware pages, we need to traverse all mapping entries to check whether corresponding 4K page is accessed or dirty during pte_dirty/pte_access functions. Signed-off-by: Xu Lu <luxu.kernel@bytedance.com> --- arch/riscv/include/asm/pgtable.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+)