Message ID | 20230824133135.1176709-2-puranjay12@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | bpf, riscv: use BPF prog pack allocator in BPF JIT | expand |
On Thu, Aug 24, 2023 at 6:31 AM Puranjay Mohan <puranjay12@gmail.com> wrote: > > The patch_insn_write() function currently doesn't work for multiple > pages of instructions, therefore patch_text_nosync() will fail with a > page fault if called with lengths spanning multiple pages. > > This commit extends the patch_insn_write() function to support multiple > pages by copying at max 2 pages at a time in a loop. This implementation > is similar to text_poke_copy() function of x86. > > Signed-off-by: Puranjay Mohan <puranjay12@gmail.com> > Reviewed-by: Björn Töpel <bjorn@rivosinc.com> > --- > arch/riscv/kernel/patch.c | 39 ++++++++++++++++++++++++++++++++++----- > 1 file changed, 34 insertions(+), 5 deletions(-) > > diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c > index 575e71d6c8ae..465b2eebbc37 100644 > --- a/arch/riscv/kernel/patch.c > +++ b/arch/riscv/kernel/patch.c > @@ -53,12 +53,18 @@ static void patch_unmap(int fixmap) > } > NOKPROBE_SYMBOL(patch_unmap); > > -static int patch_insn_write(void *addr, const void *insn, size_t len) > +static int __patch_insn_write(void *addr, const void *insn, size_t len) > { > void *waddr = addr; > bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE; > int ret; > > + /* > + * Only two pages can be mapped at a time for writing. > + */ > + if (len > 2 * PAGE_SIZE) > + return -EINVAL; This check cannot guarantee __patch_insn_write touch at most two pages. Maybe use if (len + offset_in_page(addr) > 2 * PAGE_SIZE) return -EINVAL; ? Thanks, Song > /* > * Before reaching here, it was expected to lock the text_mutex > * already, so we don't need to give another lock here and could > @@ -74,7 +80,7 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) > lockdep_assert_held(&text_mutex); > > if (across_pages) > - patch_map(addr + len, FIX_TEXT_POKE1); > + patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); > > waddr = patch_map(addr, FIX_TEXT_POKE0); > > @@ -87,15 +93,38 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) > > return ret; > } > -NOKPROBE_SYMBOL(patch_insn_write); > +NOKPROBE_SYMBOL(__patch_insn_write); > #else > -static int patch_insn_write(void *addr, const void *insn, size_t len) > +static int __patch_insn_write(void *addr, const void *insn, size_t len) > { > return copy_to_kernel_nofault(addr, insn, len); > } > -NOKPROBE_SYMBOL(patch_insn_write); > +NOKPROBE_SYMBOL(__patch_insn_write); > #endif /* CONFIG_MMU */ > > +static int patch_insn_write(void *addr, const void *insn, size_t len) > +{ > + size_t patched = 0; > + size_t size; > + int ret = 0; > + > + /* > + * Copy the instructions to the destination address, two pages at a time > + * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE. > + */ > + while (patched < len && !ret) { > + size = min_t(size_t, > + PAGE_SIZE * 2 - offset_in_page(addr + patched), > + len - patched); > + ret = __patch_insn_write(addr + patched, insn + patched, size); > + > + patched += size; > + } > + > + return ret; > +} > +NOKPROBE_SYMBOL(patch_insn_write); > + > int patch_text_nosync(void *addr, const void *insns, size_t len) > { > u32 *tp = addr; > -- > 2.39.2 >
Hi Song, On Thu, Aug 24, 2023 at 11:57 PM Song Liu <song@kernel.org> wrote: > > On Thu, Aug 24, 2023 at 6:31 AM Puranjay Mohan <puranjay12@gmail.com> wrote: > > > > The patch_insn_write() function currently doesn't work for multiple > > pages of instructions, therefore patch_text_nosync() will fail with a > > page fault if called with lengths spanning multiple pages. > > > > This commit extends the patch_insn_write() function to support multiple > > pages by copying at max 2 pages at a time in a loop. This implementation > > is similar to text_poke_copy() function of x86. > > > > Signed-off-by: Puranjay Mohan <puranjay12@gmail.com> > > Reviewed-by: Björn Töpel <bjorn@rivosinc.com> > > --- > > arch/riscv/kernel/patch.c | 39 ++++++++++++++++++++++++++++++++++----- > > 1 file changed, 34 insertions(+), 5 deletions(-) > > > > diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c > > index 575e71d6c8ae..465b2eebbc37 100644 > > --- a/arch/riscv/kernel/patch.c > > +++ b/arch/riscv/kernel/patch.c > > @@ -53,12 +53,18 @@ static void patch_unmap(int fixmap) > > } > > NOKPROBE_SYMBOL(patch_unmap); > > > > -static int patch_insn_write(void *addr, const void *insn, size_t len) > > +static int __patch_insn_write(void *addr, const void *insn, size_t len) > > { > > void *waddr = addr; > > bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE; > > int ret; > > > > + /* > > + * Only two pages can be mapped at a time for writing. > > + */ > > + if (len > 2 * PAGE_SIZE) > > + return -EINVAL; > > This check cannot guarantee __patch_insn_write touch at most two pages. Yes, I just realised this can span 3 pages if len = 2 * PAGE_SIZE and offset_in_page(addr) > 0. > Maybe use > > if (len + offset_in_page(addr) > 2 * PAGE_SIZE) > return -EINVAL; > ? Will fix it in the next version. > > Thanks, > Song > > > /* > > * Before reaching here, it was expected to lock the text_mutex > > * already, so we don't need to give another lock here and could > > @@ -74,7 +80,7 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) > > lockdep_assert_held(&text_mutex); > > > > if (across_pages) > > - patch_map(addr + len, FIX_TEXT_POKE1); > > + patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); > > > > waddr = patch_map(addr, FIX_TEXT_POKE0); > > > > @@ -87,15 +93,38 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) > > > > return ret; > > } > > -NOKPROBE_SYMBOL(patch_insn_write); > > +NOKPROBE_SYMBOL(__patch_insn_write); > > #else > > -static int patch_insn_write(void *addr, const void *insn, size_t len) > > +static int __patch_insn_write(void *addr, const void *insn, size_t len) > > { > > return copy_to_kernel_nofault(addr, insn, len); > > } > > -NOKPROBE_SYMBOL(patch_insn_write); > > +NOKPROBE_SYMBOL(__patch_insn_write); > > #endif /* CONFIG_MMU */ > > > > +static int patch_insn_write(void *addr, const void *insn, size_t len) > > +{ > > + size_t patched = 0; > > + size_t size; > > + int ret = 0; > > + > > + /* > > + * Copy the instructions to the destination address, two pages at a time > > + * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE. > > + */ > > + while (patched < len && !ret) { > > + size = min_t(size_t, > > + PAGE_SIZE * 2 - offset_in_page(addr + patched), > > + len - patched); > > + ret = __patch_insn_write(addr + patched, insn + patched, size); > > + > > + patched += size; > > + } > > + > > + return ret; > > +} > > +NOKPROBE_SYMBOL(patch_insn_write); > > + > > int patch_text_nosync(void *addr, const void *insns, size_t len) > > { > > u32 *tp = addr; > > -- > > 2.39.2 > > Thanks, Puranjay
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 575e71d6c8ae..465b2eebbc37 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -53,12 +53,18 @@ static void patch_unmap(int fixmap) } NOKPROBE_SYMBOL(patch_unmap); -static int patch_insn_write(void *addr, const void *insn, size_t len) +static int __patch_insn_write(void *addr, const void *insn, size_t len) { void *waddr = addr; bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE; int ret; + /* + * Only two pages can be mapped at a time for writing. + */ + if (len > 2 * PAGE_SIZE) + return -EINVAL; + /* * Before reaching here, it was expected to lock the text_mutex * already, so we don't need to give another lock here and could @@ -74,7 +80,7 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) lockdep_assert_held(&text_mutex); if (across_pages) - patch_map(addr + len, FIX_TEXT_POKE1); + patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); waddr = patch_map(addr, FIX_TEXT_POKE0); @@ -87,15 +93,38 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) return ret; } -NOKPROBE_SYMBOL(patch_insn_write); +NOKPROBE_SYMBOL(__patch_insn_write); #else -static int patch_insn_write(void *addr, const void *insn, size_t len) +static int __patch_insn_write(void *addr, const void *insn, size_t len) { return copy_to_kernel_nofault(addr, insn, len); } -NOKPROBE_SYMBOL(patch_insn_write); +NOKPROBE_SYMBOL(__patch_insn_write); #endif /* CONFIG_MMU */ +static int patch_insn_write(void *addr, const void *insn, size_t len) +{ + size_t patched = 0; + size_t size; + int ret = 0; + + /* + * Copy the instructions to the destination address, two pages at a time + * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE. + */ + while (patched < len && !ret) { + size = min_t(size_t, + PAGE_SIZE * 2 - offset_in_page(addr + patched), + len - patched); + ret = __patch_insn_write(addr + patched, insn + patched, size); + + patched += size; + } + + return ret; +} +NOKPROBE_SYMBOL(patch_insn_write); + int patch_text_nosync(void *addr, const void *insns, size_t len) { u32 *tp = addr;