diff mbox

[v2,02/22] ARM: add self test for runtime patch mechanism

Message ID 1344648306-15619-3-git-send-email-cyril@ti.com (mailing list archive)
State New, archived
Headers show

Commit Message

Cyril Chemparathy Aug. 11, 2012, 1:24 a.m. UTC
This patch adds basic sanity tests to ensure that the instruction patching
results in valid instruction encodings.  This is done by verifying the output
of the patch process against a vector of assembler generated instructions at
init time.

Signed-off-by: Cyril Chemparathy <cyril@ti.com>
---
 arch/arm/Kconfig                |   12 ++++++++++++
 arch/arm/kernel/runtime-patch.c |   41 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 53 insertions(+)

Comments

Nicolas Pitre Aug. 12, 2012, 2:35 a.m. UTC | #1
On Fri, 10 Aug 2012, Cyril Chemparathy wrote:

> This patch adds basic sanity tests to ensure that the instruction patching
> results in valid instruction encodings.  This is done by verifying the output
> of the patch process against a vector of assembler generated instructions at
> init time.
> 
> Signed-off-by: Cyril Chemparathy <cyril@ti.com>
> ---
>  arch/arm/Kconfig                |   12 ++++++++++++
>  arch/arm/kernel/runtime-patch.c |   41 +++++++++++++++++++++++++++++++++++++++
>  2 files changed, 53 insertions(+)
> 
> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
> index d0a04ad..7e552dc 100644
> --- a/arch/arm/Kconfig
> +++ b/arch/arm/Kconfig
> @@ -211,6 +211,18 @@ config ARM_PATCH_PHYS_VIRT
>  	  this feature (eg, building a kernel for a single machine) and
>  	  you need to shrink the kernel to the minimal size.
>  
> +config ARM_RUNTIME_PATCH_TEST
> +	bool "Self test runtime patching mechanism" if ARM_RUNTIME_PATCH
> +	default y
> +	help
> +	  Select this to enable init time self checking for the runtime kernel
> +	  patching mechanism.  This enables an ISA specific set of tests that
> +	  ensure that the instructions generated by the patch process are
> +	  consistent with those generated by the assembler at compile time.
> +
> +	  Only disable this option if you need to shrink the kernel to the
> +	  minimal size.
> +
>  config NEED_MACH_IO_H
>  	bool
>  	help
> diff --git a/arch/arm/kernel/runtime-patch.c b/arch/arm/kernel/runtime-patch.c
> index fd37a2b..c471d8c 100644
> --- a/arch/arm/kernel/runtime-patch.c
> +++ b/arch/arm/kernel/runtime-patch.c
> @@ -163,6 +163,44 @@ static int apply_patch_imm8(const struct patch_info *p)
>  	return 0;
>  }
>  
> +#ifdef CONFIG_ARM_RUNTIME_PATCH_TEST
> +static void __init __used __naked __patch_test_code_imm8(void)
> +{
> +	__asm__ __volatile__ (
> +		"	.irp	shift1, 0, 6, 12, 18\n"
> +		"	.irp	shift2, 0, 1, 2, 3, 4, 5\n"
> +		"	add     r1, r2, #(0x41 << (\\shift1 + \\shift2))\n"
> +		"	.endr\n"
> +		"	.endr\n"


Maybe adding a "add r1, r2 #0x81 << 24" here might be a good thing since 
this is the most used case but missing from the above.

> +		"	.word	0\n"
> +		: : :
> +	);
> +}
> +
> +static void __init test_patch_imm8(void)
> +{
> +	u32 test_code_addr = (u32)(&__patch_test_code_imm8);
> +	u32 *test_code = (u32 *)(test_code_addr & ~0x3);

Why this masking?  With Thumb2 you may find functions starting at 
halfword aligned addresses.  Only the LSB (indicating thumb mode) should 
be masked out.

> +	int i, ret;
> +	u32 ninsn, insn;
> +
> +	insn = test_code[0];
> +	for (i = 0; test_code[i]; i++) {
> +		ret = do_patch_imm8(insn, 0x41 << i, &ninsn);
> +		if (ret < 0)
> +			pr_err("runtime patch (imm8): failed at shift %d\n", i);
> +		else if (ninsn != test_code[i])
> +			pr_err("runtime patch (imm8): failed, need %x got %x\n",
> +			       test_code[i], ninsn);
> +	}
> +}
> +
> +static void __init runtime_patch_test(void)
> +{
> +	test_patch_imm8();
> +}
> +#endif
> +
>  int runtime_patch(const void *table, unsigned size)
>  {
>  	const struct patch_info *p = table, *end = (table + size);
> @@ -185,5 +223,8 @@ void __init runtime_patch_kernel(void)
>  	const void *start = &__runtime_patch_table_begin;
>  	const void *end   = &__runtime_patch_table_end;
>  
> +#ifdef CONFIG_ARM_RUNTIME_PATCH_TEST
> +	runtime_patch_test();
> +#endif
>  	BUG_ON(runtime_patch(start, end - start));
>  }
> -- 
> 1.7.9.5
>
Cyril Chemparathy Aug. 12, 2012, 4:32 p.m. UTC | #2
On 08/11/12 22:35, Nicolas Pitre wrote:
> On Fri, 10 Aug 2012, Cyril Chemparathy wrote:
>
>> This patch adds basic sanity tests to ensure that the instruction patching
>> results in valid instruction encodings.  This is done by verifying the output
>> of the patch process against a vector of assembler generated instructions at
>> init time.
>>
>> Signed-off-by: Cyril Chemparathy <cyril@ti.com>
>> ---
[...]
>> +	__asm__ __volatile__ (
>> +		"	.irp	shift1, 0, 6, 12, 18\n"
>> +		"	.irp	shift2, 0, 1, 2, 3, 4, 5\n"
>> +		"	add     r1, r2, #(0x41 << (\\shift1 + \\shift2))\n"
>> +		"	.endr\n"
>> +		"	.endr\n"
>
>
> Maybe adding a "add r1, r2 #0x81 << 24" here might be a good thing since
> this is the most used case but missing from the above.
>

Indeed.  I've now replaced this with something a bit more extensive. 
Does the following look better?

+struct patch_test_imm8 {
+       u16     imm;
+       u16     shift;
+       u32     insn;
+};
+
+static void __init __used __naked __patch_test_code_imm8(void)
+{
+       __asm__ __volatile__ (
+
+               /* a single test case */
+               "       .macro          test_one, imm, sft\n"
+               "       .hword          \\imm\n"
+               "       .hword          \\sft\n"
+               "       add             r1, r2, #(\\imm << \\sft)\n"
+               "       .endm\n"
+
+               /* a sequence of tests at 'inc' increments of shift */
+               "       .macro          test_seq, imm, sft, max, inc\n"
+               "       test_one        \\imm, \\sft\n"
+               "       .if             \\sft < \\max\n"
+               "       test_seq        \\imm, (\\sft + \\inc), \\max, 
\\inc\n"
+               "       .endif\n"
+               "       .endm\n"
+
+               /* an empty record to mark the end */
+               "       .macro          test_end\n"
+               "       .hword          0, 0\n"
+               "       .word           0\n"
+               "       .endm\n"
+
+               /* finally generate the test sequences */
+               "       test_seq        0x41, 0, 24, 1\n"
+               "       test_seq        0x81, 0, 24, 2\n"
+               "       test_end\n"
+               : : :
+       );
+}
+

[...]
>> +	u32 test_code_addr = (u32)(&__patch_test_code_imm8);
>> +	u32 *test_code = (u32 *)(test_code_addr & ~0x3);
>
> Why this masking?  With Thumb2 you may find functions starting at
> halfword aligned addresses.  Only the LSB (indicating thumb mode) should
> be masked out.
>

Fixed.  Thanks.

Thanks
-- Cyril.
Nicolas Pitre Aug. 13, 2012, 3:19 a.m. UTC | #3
On Sun, 12 Aug 2012, Cyril Chemparathy wrote:

> On 08/11/12 22:35, Nicolas Pitre wrote:
> > On Fri, 10 Aug 2012, Cyril Chemparathy wrote:
> > 
> > > This patch adds basic sanity tests to ensure that the instruction patching
> > > results in valid instruction encodings.  This is done by verifying the
> > > output
> > > of the patch process against a vector of assembler generated instructions
> > > at
> > > init time.
> > > 
> > > Signed-off-by: Cyril Chemparathy <cyril@ti.com>
> > > ---
> [...]
> > > +	__asm__ __volatile__ (
> > > +		"	.irp	shift1, 0, 6, 12, 18\n"
> > > +		"	.irp	shift2, 0, 1, 2, 3, 4, 5\n"
> > > +		"	add     r1, r2, #(0x41 << (\\shift1 + \\shift2))\n"
> > > +		"	.endr\n"
> > > +		"	.endr\n"
> > 
> > 
> > Maybe adding a "add r1, r2 #0x81 << 24" here might be a good thing since
> > this is the most used case but missing from the above.
> > 
> 
> Indeed.  I've now replaced this with something a bit more extensive. Does the
> following look better?
> 
> +struct patch_test_imm8 {
> +       u16     imm;
> +       u16     shift;
> +       u32     insn;
> +};
> +
> +static void __init __used __naked __patch_test_code_imm8(void)
> +{
> +       __asm__ __volatile__ (
> +
> +               /* a single test case */
> +               "       .macro          test_one, imm, sft\n"
> +               "       .hword          \\imm\n"
> +               "       .hword          \\sft\n"
> +               "       add             r1, r2, #(\\imm << \\sft)\n"
> +               "       .endm\n"
> +
> +               /* a sequence of tests at 'inc' increments of shift */
> +               "       .macro          test_seq, imm, sft, max, inc\n"
> +               "       test_one        \\imm, \\sft\n"
> +               "       .if             \\sft < \\max\n"
> +               "       test_seq        \\imm, (\\sft + \\inc), \\max,
> \\inc\n"
> +               "       .endif\n"
> +               "       .endm\n"
> +
> +               /* an empty record to mark the end */
> +               "       .macro          test_end\n"
> +               "       .hword          0, 0\n"
> +               "       .word           0\n"
> +               "       .endm\n"
> +
> +               /* finally generate the test sequences */
> +               "       test_seq        0x41, 0, 24, 1\n"
> +               "       test_seq        0x81, 0, 24, 2\n"
> +               "       test_end\n"
> +               : : :
> +       );
> +}

Yes, this is certainly quite extensive.  :-)


Nicolas
diff mbox

Patch

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d0a04ad..7e552dc 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -211,6 +211,18 @@  config ARM_PATCH_PHYS_VIRT
 	  this feature (eg, building a kernel for a single machine) and
 	  you need to shrink the kernel to the minimal size.
 
+config ARM_RUNTIME_PATCH_TEST
+	bool "Self test runtime patching mechanism" if ARM_RUNTIME_PATCH
+	default y
+	help
+	  Select this to enable init time self checking for the runtime kernel
+	  patching mechanism.  This enables an ISA specific set of tests that
+	  ensure that the instructions generated by the patch process are
+	  consistent with those generated by the assembler at compile time.
+
+	  Only disable this option if you need to shrink the kernel to the
+	  minimal size.
+
 config NEED_MACH_IO_H
 	bool
 	help
diff --git a/arch/arm/kernel/runtime-patch.c b/arch/arm/kernel/runtime-patch.c
index fd37a2b..c471d8c 100644
--- a/arch/arm/kernel/runtime-patch.c
+++ b/arch/arm/kernel/runtime-patch.c
@@ -163,6 +163,44 @@  static int apply_patch_imm8(const struct patch_info *p)
 	return 0;
 }
 
+#ifdef CONFIG_ARM_RUNTIME_PATCH_TEST
+static void __init __used __naked __patch_test_code_imm8(void)
+{
+	__asm__ __volatile__ (
+		"	.irp	shift1, 0, 6, 12, 18\n"
+		"	.irp	shift2, 0, 1, 2, 3, 4, 5\n"
+		"	add     r1, r2, #(0x41 << (\\shift1 + \\shift2))\n"
+		"	.endr\n"
+		"	.endr\n"
+		"	.word	0\n"
+		: : :
+	);
+}
+
+static void __init test_patch_imm8(void)
+{
+	u32 test_code_addr = (u32)(&__patch_test_code_imm8);
+	u32 *test_code = (u32 *)(test_code_addr & ~0x3);
+	int i, ret;
+	u32 ninsn, insn;
+
+	insn = test_code[0];
+	for (i = 0; test_code[i]; i++) {
+		ret = do_patch_imm8(insn, 0x41 << i, &ninsn);
+		if (ret < 0)
+			pr_err("runtime patch (imm8): failed at shift %d\n", i);
+		else if (ninsn != test_code[i])
+			pr_err("runtime patch (imm8): failed, need %x got %x\n",
+			       test_code[i], ninsn);
+	}
+}
+
+static void __init runtime_patch_test(void)
+{
+	test_patch_imm8();
+}
+#endif
+
 int runtime_patch(const void *table, unsigned size)
 {
 	const struct patch_info *p = table, *end = (table + size);
@@ -185,5 +223,8 @@  void __init runtime_patch_kernel(void)
 	const void *start = &__runtime_patch_table_begin;
 	const void *end   = &__runtime_patch_table_end;
 
+#ifdef CONFIG_ARM_RUNTIME_PATCH_TEST
+	runtime_patch_test();
+#endif
 	BUG_ON(runtime_patch(start, end - start));
 }