Message ID | 1405405512-4423-9-git-send-email-zlim.lnx@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Tue, Jul 15, 2014 at 07:25:06AM +0100, Zi Shen Lim wrote: > Introduce function to generate move wide (immediate) instructions. [...] > +u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, > + int imm, int shift, > + enum aarch64_insn_variant variant, > + enum aarch64_insn_movewide_type type) > +{ > + u32 insn; > + > + switch (type) { > + case AARCH64_INSN_MOVEWIDE_ZERO: > + insn = aarch64_insn_get_movz_value(); > + break; > + case AARCH64_INSN_MOVEWIDE_KEEP: > + insn = aarch64_insn_get_movk_value(); > + break; > + case AARCH64_INSN_MOVEWIDE_INVERSE: > + insn = aarch64_insn_get_movn_value(); > + break; > + default: > + BUG_ON(1); > + } > + > + BUG_ON(imm < 0 || imm > 65535); Do this check with masking instead? > + > + switch (variant) { > + case AARCH64_INSN_VARIANT_32BIT: > + BUG_ON(shift != 0 && shift != 16); > + break; > + case AARCH64_INSN_VARIANT_64BIT: > + insn |= BIT(31); > + BUG_ON(shift != 0 && shift != 16 && shift != 32 && > + shift != 48); Would be neater as a nested switch, perhaps? If you reorder the outer-switch, you could probably fall-through too and combine the shift checks. Will
From: Will Deacon ... > > + BUG_ON(imm < 0 || imm > 65535); > > Do this check with masking instead? The compiler will convert that to a single unsigned comparison. ... > > + BUG_ON(shift != 0 && shift != 16 && shift != 32 && > > + shift != 48); OTOH I don't think it will convert that to: BUG_ON(shift & ~48); David
On Wed, Jul 16, 2014 at 05:17:15PM +0100, Will Deacon wrote: > On Tue, Jul 15, 2014 at 07:25:06AM +0100, Zi Shen Lim wrote: > > Introduce function to generate move wide (immediate) instructions. > > [...] > > > +u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, > > + int imm, int shift, > > + enum aarch64_insn_variant variant, > > + enum aarch64_insn_movewide_type type) > > +{ > > + u32 insn; > > + > > + switch (type) { > > + case AARCH64_INSN_MOVEWIDE_ZERO: > > + insn = aarch64_insn_get_movz_value(); > > + break; > > + case AARCH64_INSN_MOVEWIDE_KEEP: > > + insn = aarch64_insn_get_movk_value(); > > + break; > > + case AARCH64_INSN_MOVEWIDE_INVERSE: > > + insn = aarch64_insn_get_movn_value(); > > + break; > > + default: > > + BUG_ON(1); > > + } > > + > > + BUG_ON(imm < 0 || imm > 65535); > > Do this check with masking instead? Ok, if you prefer, I can change it to: BUG_ON(imm & ~GENMASK(15, 0)); > > > + > > + switch (variant) { > > + case AARCH64_INSN_VARIANT_32BIT: > > + BUG_ON(shift != 0 && shift != 16); > > + break; > > + case AARCH64_INSN_VARIANT_64BIT: > > + insn |= BIT(31); > > + BUG_ON(shift != 0 && shift != 16 && shift != 32 && > > + shift != 48); > > Would be neater as a nested switch, perhaps? If you reorder the > outer-switch, you could probably fall-through too and combine the shift > checks. Not sure I picture what you had in mind... I couldn't come up with a neater version with the properties you described. The alternative I had was using masks instead of integer values, but one could argue that while neater, it could also be harder to read: switch (variant) { case AARCH64_INSN_VARIANT_32BIT: BUG_ON(shift & ~BIT(4)); break; case AARCH64_INSN_VARIANT_64BIT: insn |= BIT(31); BUG_ON(shift & ~GENMASK(5, 4)); ... > > Will
On Wed, Jul 16, 2014 at 11:04:22PM +0100, Zi Shen Lim wrote: > On Wed, Jul 16, 2014 at 05:17:15PM +0100, Will Deacon wrote: > > On Tue, Jul 15, 2014 at 07:25:06AM +0100, Zi Shen Lim wrote: > > > Introduce function to generate move wide (immediate) instructions. > > > > [...] > > > > > +u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, > > > + int imm, int shift, > > > + enum aarch64_insn_variant variant, > > > + enum aarch64_insn_movewide_type type) > > > +{ > > > + u32 insn; > > > + > > > + switch (type) { > > > + case AARCH64_INSN_MOVEWIDE_ZERO: > > > + insn = aarch64_insn_get_movz_value(); > > > + break; > > > + case AARCH64_INSN_MOVEWIDE_KEEP: > > > + insn = aarch64_insn_get_movk_value(); > > > + break; > > > + case AARCH64_INSN_MOVEWIDE_INVERSE: > > > + insn = aarch64_insn_get_movn_value(); > > > + break; > > > + default: > > > + BUG_ON(1); > > > + } > > > + > > > + BUG_ON(imm < 0 || imm > 65535); > > > > Do this check with masking instead? > > Ok, if you prefer, I can change it to: > > BUG_ON(imm & ~GENMASK(15, 0)); Sure, that or use a named constant for the upper-bound (SZ_64K - 1). > > > + switch (variant) { > > > + case AARCH64_INSN_VARIANT_32BIT: > > > + BUG_ON(shift != 0 && shift != 16); > > > + break; > > > + case AARCH64_INSN_VARIANT_64BIT: > > > + insn |= BIT(31); > > > + BUG_ON(shift != 0 && shift != 16 && shift != 32 && > > > + shift != 48); > > > > Would be neater as a nested switch, perhaps? If you reorder the > > outer-switch, you could probably fall-through too and combine the shift > > checks. > > Not sure I picture what you had in mind... I couldn't come up with a > neater version with the properties you described. > > The alternative I had was using masks instead of integer values, but > one could argue that while neater, it could also be harder to read: > > switch (variant) { > case AARCH64_INSN_VARIANT_32BIT: > BUG_ON(shift & ~BIT(4)); > break; > case AARCH64_INSN_VARIANT_64BIT: > insn |= BIT(31); > BUG_ON(shift & ~GENMASK(5, 4)); > ... I was thinking of using nested switches, but that doesn't fall out like I hoped. How about: switch (variant) { case AARCH64_INSN_VARIANT_64BIT: BUG_ON(shift != 32 && shift != 48); case AARCH64_INSN_VARIANT_32BIT: BUG_ON(shift != 0 && shift != 16); }; ? Will
From: Will Deacon ... > > > > + BUG_ON(imm < 0 || imm > 65535); > > > > > > Do this check with masking instead? > > > > Ok, if you prefer, I can change it to: > > > > BUG_ON(imm & ~GENMASK(15, 0)); Gah - then anyone reading the code has to look up another define. There isn't a prize for the most complicated method of defining a constant that can never change. > Sure, that or use a named constant for the upper-bound (SZ_64K - 1). There is nothing wrong with the original code. Maybe use 0xffff for those people (are there any) that don't know their powers of two. These are strong constants, they aren't going to be changed to any other value, and it is unlikely that anyone will want to search for their uses. I presume that SZ_64K is defined somewhere to 0x10000u. But IMHO using it (instead of the literal) doesn't make code any more readable. David
(resending this email in case the first one got caught in your spam filter. sorry.) On Thu, Jul 17, 2014 at 10:41:02AM +0100, Will Deacon wrote: > On Wed, Jul 16, 2014 at 11:04:22PM +0100, Zi Shen Lim wrote: > > On Wed, Jul 16, 2014 at 05:17:15PM +0100, Will Deacon wrote: > > > On Tue, Jul 15, 2014 at 07:25:06AM +0100, Zi Shen Lim wrote: > > > > Introduce function to generate move wide (immediate) instructions. [...] > > > > + switch (variant) { > > > > + case AARCH64_INSN_VARIANT_32BIT: > > > > + BUG_ON(shift != 0 && shift != 16); > > > > + break; > > > > + case AARCH64_INSN_VARIANT_64BIT: > > > > + insn |= BIT(31); > > > > + BUG_ON(shift != 0 && shift != 16 && shift != 32 && > > > > + shift != 48); > > > > > > Would be neater as a nested switch, perhaps? If you reorder the > > > outer-switch, you could probably fall-through too and combine the shift > > > checks. > > > > Not sure I picture what you had in mind... I couldn't come up with a > > neater version with the properties you described. > > > > The alternative I had was using masks instead of integer values, but > > one could argue that while neater, it could also be harder to read: > > > > switch (variant) { > > case AARCH64_INSN_VARIANT_32BIT: > > BUG_ON(shift & ~BIT(4)); > > break; > > case AARCH64_INSN_VARIANT_64BIT: > > insn |= BIT(31); > > BUG_ON(shift & ~GENMASK(5, 4)); > > ... > > I was thinking of using nested switches, but that doesn't fall out like I > hoped. How about: > > switch (variant) { > case AARCH64_INSN_VARIANT_64BIT: > BUG_ON(shift != 32 && shift != 48); Sorry this won't work. For example, on the valid case of shift==0, we'll barf right here - no fallthrough. Shall we just leave the code as is? :) > case AARCH64_INSN_VARIANT_32BIT: > BUG_ON(shift != 0 && shift != 16); > }; > > ? > > Will
On Fri, Jul 18, 2014 at 06:47:22AM +0100, Z Lim wrote: > (resending this email in case the first one got caught in your spam > filter. sorry.) > > On Thu, Jul 17, 2014 at 10:41:02AM +0100, Will Deacon wrote: > > On Wed, Jul 16, 2014 at 11:04:22PM +0100, Zi Shen Lim wrote: > > > On Wed, Jul 16, 2014 at 05:17:15PM +0100, Will Deacon wrote: > > > > On Tue, Jul 15, 2014 at 07:25:06AM +0100, Zi Shen Lim wrote: > > > > > Introduce function to generate move wide (immediate) instructions. > [...] > > > > > + switch (variant) { > > > > > + case AARCH64_INSN_VARIANT_32BIT: > > > > > + BUG_ON(shift != 0 && shift != 16); > > > > > + break; > > > > > + case AARCH64_INSN_VARIANT_64BIT: > > > > > + insn |= BIT(31); > > > > > + BUG_ON(shift != 0 && shift != 16 && shift != 32 && > > > > > + shift != 48); > > > > > > > > Would be neater as a nested switch, perhaps? If you reorder the > > > > outer-switch, you could probably fall-through too and combine the shift > > > > checks. > > > > > > Not sure I picture what you had in mind... I couldn't come up with a > > > neater version with the properties you described. > > > > > > The alternative I had was using masks instead of integer values, but > > > one could argue that while neater, it could also be harder to read: > > > > > > switch (variant) { > > > case AARCH64_INSN_VARIANT_32BIT: > > > BUG_ON(shift & ~BIT(4)); > > > break; > > > case AARCH64_INSN_VARIANT_64BIT: > > > insn |= BIT(31); > > > BUG_ON(shift & ~GENMASK(5, 4)); > > > ... > > > > I was thinking of using nested switches, but that doesn't fall out like I > > hoped. How about: > > > > switch (variant) { > > case AARCH64_INSN_VARIANT_64BIT: > > BUG_ON(shift != 32 && shift != 48); > > Sorry this won't work. For example, on the valid case of shift==0, > we'll barf right here - no fallthrough. > > Shall we just leave the code as is? :) Yeah, I'm an idiot ;) Cheers, Will
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 8fd31fc..49dec28 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -172,6 +172,12 @@ enum aarch64_insn_adsb_type { AARCH64_INSN_ADSB_SUB_SETFLAGS }; +enum aarch64_insn_movewide_type { + AARCH64_INSN_MOVEWIDE_ZERO, + AARCH64_INSN_MOVEWIDE_KEEP, + AARCH64_INSN_MOVEWIDE_INVERSE +}; + enum aarch64_insn_bitfield_type { AARCH64_INSN_BITFIELD_MOVE, AARCH64_INSN_BITFIELD_MOVE_UNSIGNED, @@ -194,9 +200,12 @@ __AARCH64_INSN_FUNCS(add_imm, 0x7F000000, 0x11000000) __AARCH64_INSN_FUNCS(adds_imm, 0x7F000000, 0x31000000) __AARCH64_INSN_FUNCS(sub_imm, 0x7F000000, 0x51000000) __AARCH64_INSN_FUNCS(subs_imm, 0x7F000000, 0x71000000) +__AARCH64_INSN_FUNCS(movn, 0x7F800000, 0x12800000) __AARCH64_INSN_FUNCS(sbfm, 0x7F800000, 0x13000000) __AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000) +__AARCH64_INSN_FUNCS(movz, 0x7F800000, 0x52800000) __AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000) +__AARCH64_INSN_FUNCS(movk, 0x7F800000, 0x72800000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) @@ -252,6 +261,10 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, int immr, int imms, enum aarch64_insn_variant variant, enum aarch64_insn_bitfield_type type); +u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, + int imm, int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_movewide_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 01ed35c..1cb94b4 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -653,3 +653,46 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); } + +u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, + int imm, int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_movewide_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_MOVEWIDE_ZERO: + insn = aarch64_insn_get_movz_value(); + break; + case AARCH64_INSN_MOVEWIDE_KEEP: + insn = aarch64_insn_get_movk_value(); + break; + case AARCH64_INSN_MOVEWIDE_INVERSE: + insn = aarch64_insn_get_movn_value(); + break; + default: + BUG_ON(1); + } + + BUG_ON(imm < 0 || imm > 65535); + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + BUG_ON(shift != 0 && shift != 16); + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |= BIT(31); + BUG_ON(shift != 0 && shift != 16 && shift != 32 && + shift != 48); + break; + default: + BUG_ON(1); + } + + insn |= (shift >> 4) << 21; + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); +}
Introduce function to generate move wide (immediate) instructions. Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com> --- arch/arm64/include/asm/insn.h | 13 +++++++++++++ arch/arm64/kernel/insn.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+)