Message ID | 20230328095807.7014-7-songmuchun@bytedance.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Simplify kfence code | expand |
On Tue, 28 Mar 2023 at 11:59, 'Muchun Song' via kasan-dev <kasan-dev@googlegroups.com> wrote: > > Replace ALIGN_DOWN(x, PAGE_SIZE) with PAGE_ALIGN_DOWN(x) to simplify > the code a bit. > > Signed-off-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Marco Elver <elver@google.com> > --- > mm/kfence/core.c | 12 ++++++------ > 1 file changed, 6 insertions(+), 6 deletions(-) > > diff --git a/mm/kfence/core.c b/mm/kfence/core.c > index f205b860f460..dbfb79a4d624 100644 > --- a/mm/kfence/core.c > +++ b/mm/kfence/core.c > @@ -230,17 +230,17 @@ static bool alloc_covered_contains(u32 alloc_stack_hash) > > static inline void kfence_protect(unsigned long addr) > { > - kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true); > + kfence_protect_page(PAGE_ALIGN_DOWN(addr), true); > } > > static inline void kfence_unprotect(unsigned long addr) > { > - kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false); > + kfence_protect_page(PAGE_ALIGN_DOWN(addr), false); > } > > static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) > { > - return ALIGN_DOWN(meta->addr, PAGE_SIZE); > + return PAGE_ALIGN_DOWN(meta->addr); > } > > /* > @@ -308,7 +308,7 @@ static inline bool check_canary_byte(u8 *addr) > /* __always_inline this to ensure we won't do an indirect call to fn. */ > static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *)) > { > - const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); > + const unsigned long pageaddr = PAGE_ALIGN_DOWN(meta->addr); > unsigned long addr; > > /* > @@ -455,7 +455,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z > } > > /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ > - kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE, > + kcsan_begin_scoped_access((void *)PAGE_ALIGN_DOWN((unsigned long)addr), PAGE_SIZE, > KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, > &assert_page_exclusive); > > @@ -464,7 +464,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z > > /* Restore page protection if there was an OOB access. */ > if (meta->unprotected_page) { > - memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); > + memzero_explicit((void *)PAGE_ALIGN_DOWN(meta->unprotected_page), PAGE_SIZE); > kfence_protect(meta->unprotected_page); > meta->unprotected_page = 0; > } > -- > 2.11.0 > > -- > You received this message because you are subscribed to the Google Groups "kasan-dev" group. > To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@googlegroups.com. > To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20230328095807.7014-7-songmuchun%40bytedance.com.
diff --git a/mm/kfence/core.c b/mm/kfence/core.c index f205b860f460..dbfb79a4d624 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -230,17 +230,17 @@ static bool alloc_covered_contains(u32 alloc_stack_hash) static inline void kfence_protect(unsigned long addr) { - kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true); + kfence_protect_page(PAGE_ALIGN_DOWN(addr), true); } static inline void kfence_unprotect(unsigned long addr) { - kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false); + kfence_protect_page(PAGE_ALIGN_DOWN(addr), false); } static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) { - return ALIGN_DOWN(meta->addr, PAGE_SIZE); + return PAGE_ALIGN_DOWN(meta->addr); } /* @@ -308,7 +308,7 @@ static inline bool check_canary_byte(u8 *addr) /* __always_inline this to ensure we won't do an indirect call to fn. */ static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *)) { - const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); + const unsigned long pageaddr = PAGE_ALIGN_DOWN(meta->addr); unsigned long addr; /* @@ -455,7 +455,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z } /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ - kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE, + kcsan_begin_scoped_access((void *)PAGE_ALIGN_DOWN((unsigned long)addr), PAGE_SIZE, KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, &assert_page_exclusive); @@ -464,7 +464,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z /* Restore page protection if there was an OOB access. */ if (meta->unprotected_page) { - memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); + memzero_explicit((void *)PAGE_ALIGN_DOWN(meta->unprotected_page), PAGE_SIZE); kfence_protect(meta->unprotected_page); meta->unprotected_page = 0; }
Replace ALIGN_DOWN(x, PAGE_SIZE) with PAGE_ALIGN_DOWN(x) to simplify the code a bit. Signed-off-by: Muchun Song <songmuchun@bytedance.com> --- mm/kfence/core.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-)