diff mbox series

[bpf-next,v3,2/4] arm64: insn: add encoders for atomic operations

Message ID 20220129220452.194585-3-houtao1@huawei.com (mailing list archive)
State New, archived
Headers show
Series bpf, arm64: support more atomic ops | expand

Commit Message

Hou Tao Jan. 29, 2022, 10:04 p.m. UTC
It is a preparation patch for eBPF atomic supports under arm64. eBPF
needs support atomic[64]_fetch_add, atomic[64]_[fetch_]{and,or,xor} and
atomic[64]_{xchg|cmpxchg}. The ordering semantics of eBPF atomics are
the same with the implementations in linux kernel.

Add three helpers to support LDCLR/LDEOR/LDSET/SWP, CAS and DMB
instructions. STADD/STCLR/STEOR/STSET are simply encoded as aliases for
LDADD/LDCLR/LDEOR/LDSET with XZR as the destination register, so no extra
helper is added. atomic_fetch_add() and other atomic ops needs support for
STLXR instruction, so extend enum aarch64_insn_ldst_type to do that.

LDADD/LDEOR/LDSET/SWP and CAS instructions are only available when LSE
atomics is enabled, so just return AARCH64_BREAK_FAULT directly in
these newly-added helpers if CONFIG_ARM64_LSE_ATOMICS is disabled.

Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 arch/arm64/include/asm/insn.h |  80 +++++++++++++--
 arch/arm64/lib/insn.c         | 185 +++++++++++++++++++++++++++++++---
 2 files changed, 244 insertions(+), 21 deletions(-)

Comments

Daniel Borkmann Feb. 11, 2022, 2:39 p.m. UTC | #1
On 1/29/22 11:04 PM, Hou Tao wrote:
> It is a preparation patch for eBPF atomic supports under arm64. eBPF
> needs support atomic[64]_fetch_add, atomic[64]_[fetch_]{and,or,xor} and
> atomic[64]_{xchg|cmpxchg}. The ordering semantics of eBPF atomics are
> the same with the implementations in linux kernel.
> 
> Add three helpers to support LDCLR/LDEOR/LDSET/SWP, CAS and DMB
> instructions. STADD/STCLR/STEOR/STSET are simply encoded as aliases for
> LDADD/LDCLR/LDEOR/LDSET with XZR as the destination register, so no extra
> helper is added. atomic_fetch_add() and other atomic ops needs support for
> STLXR instruction, so extend enum aarch64_insn_ldst_type to do that.
> 
> LDADD/LDEOR/LDSET/SWP and CAS instructions are only available when LSE
> atomics is enabled, so just return AARCH64_BREAK_FAULT directly in
> these newly-added helpers if CONFIG_ARM64_LSE_ATOMICS is disabled.
> 
> Signed-off-by: Hou Tao <houtao1@huawei.com>

Hey Mark / Ard / Will / Catalin or others, could we get an Ack on patch 1 & 2
at min if it looks good to you?

Thanks a lot,
Daniel
Will Deacon Feb. 15, 2022, 5:42 p.m. UTC | #2
Hi Daniel,

On Fri, Feb 11, 2022 at 03:39:48PM +0100, Daniel Borkmann wrote:
> On 1/29/22 11:04 PM, Hou Tao wrote:
> > It is a preparation patch for eBPF atomic supports under arm64. eBPF
> > needs support atomic[64]_fetch_add, atomic[64]_[fetch_]{and,or,xor} and
> > atomic[64]_{xchg|cmpxchg}. The ordering semantics of eBPF atomics are
> > the same with the implementations in linux kernel.
> > 
> > Add three helpers to support LDCLR/LDEOR/LDSET/SWP, CAS and DMB
> > instructions. STADD/STCLR/STEOR/STSET are simply encoded as aliases for
> > LDADD/LDCLR/LDEOR/LDSET with XZR as the destination register, so no extra
> > helper is added. atomic_fetch_add() and other atomic ops needs support for
> > STLXR instruction, so extend enum aarch64_insn_ldst_type to do that.
> > 
> > LDADD/LDEOR/LDSET/SWP and CAS instructions are only available when LSE
> > atomics is enabled, so just return AARCH64_BREAK_FAULT directly in
> > these newly-added helpers if CONFIG_ARM64_LSE_ATOMICS is disabled.
> > 
> > Signed-off-by: Hou Tao <houtao1@huawei.com>
> 
> Hey Mark / Ard / Will / Catalin or others, could we get an Ack on patch 1 & 2
> at min if it looks good to you?

Sorry for the delay, for some reason this series has all ended up in my
spam! I'll take a look this week. If it looks good, do you mind if I queue
those two patches in arm64 on a stable branch for you to pull as well? We've
got a few other (non-BPF) changes pending to the instruction decoder, and
I'd like to avoid conflicts if we can.

Cheers,

Will
Daniel Borkmann Feb. 16, 2022, 12:21 a.m. UTC | #3
Hi Will,

On 2/15/22 6:42 PM, Will Deacon wrote:
> On Fri, Feb 11, 2022 at 03:39:48PM +0100, Daniel Borkmann wrote:
>> On 1/29/22 11:04 PM, Hou Tao wrote:
>>> It is a preparation patch for eBPF atomic supports under arm64. eBPF
>>> needs support atomic[64]_fetch_add, atomic[64]_[fetch_]{and,or,xor} and
>>> atomic[64]_{xchg|cmpxchg}. The ordering semantics of eBPF atomics are
>>> the same with the implementations in linux kernel.
>>>
>>> Add three helpers to support LDCLR/LDEOR/LDSET/SWP, CAS and DMB
>>> instructions. STADD/STCLR/STEOR/STSET are simply encoded as aliases for
>>> LDADD/LDCLR/LDEOR/LDSET with XZR as the destination register, so no extra
>>> helper is added. atomic_fetch_add() and other atomic ops needs support for
>>> STLXR instruction, so extend enum aarch64_insn_ldst_type to do that.
>>>
>>> LDADD/LDEOR/LDSET/SWP and CAS instructions are only available when LSE
>>> atomics is enabled, so just return AARCH64_BREAK_FAULT directly in
>>> these newly-added helpers if CONFIG_ARM64_LSE_ATOMICS is disabled.
>>>
>>> Signed-off-by: Hou Tao <houtao1@huawei.com>
>>
>> Hey Mark / Ard / Will / Catalin or others, could we get an Ack on patch 1 & 2
>> at min if it looks good to you?
> 
> Sorry for the delay, for some reason this series has all ended up in my
> spam! I'll take a look this week. If it looks good, do you mind if I queue
> those two patches in arm64 on a stable branch for you to pull as well? We've
> got a few other (non-BPF) changes pending to the instruction decoder, and
> I'd like to avoid conflicts if we can.

Yes, that should be totally fine.

Thanks,
Daniel
Will Deacon Feb. 16, 2022, 5:16 p.m. UTC | #4
On Fri, Feb 11, 2022 at 03:39:48PM +0100, Daniel Borkmann wrote:
> On 1/29/22 11:04 PM, Hou Tao wrote:
> > It is a preparation patch for eBPF atomic supports under arm64. eBPF
> > needs support atomic[64]_fetch_add, atomic[64]_[fetch_]{and,or,xor} and
> > atomic[64]_{xchg|cmpxchg}. The ordering semantics of eBPF atomics are
> > the same with the implementations in linux kernel.
> > 
> > Add three helpers to support LDCLR/LDEOR/LDSET/SWP, CAS and DMB
> > instructions. STADD/STCLR/STEOR/STSET are simply encoded as aliases for
> > LDADD/LDCLR/LDEOR/LDSET with XZR as the destination register, so no extra
> > helper is added. atomic_fetch_add() and other atomic ops needs support for
> > STLXR instruction, so extend enum aarch64_insn_ldst_type to do that.
> > 
> > LDADD/LDEOR/LDSET/SWP and CAS instructions are only available when LSE
> > atomics is enabled, so just return AARCH64_BREAK_FAULT directly in
> > these newly-added helpers if CONFIG_ARM64_LSE_ATOMICS is disabled.
> > 
> > Signed-off-by: Hou Tao <houtao1@huawei.com>
> 
> Hey Mark / Ard / Will / Catalin or others, could we get an Ack on patch 1 & 2
> at min if it looks good to you?

I checked the instruction encodings in patches 1 and 2 and they all look
fine to me. However, after applying those two locally I get a build failure:

  | In file included from arch/arm64/net/bpf_jit_comp.c:23:
  | arch/arm64/net/bpf_jit_comp.c: In function ‘build_insn’:
  | arch/arm64/net/bpf_jit.h:94:2: error: implicit declaration of function ‘aarch64_insn_gen_stadd’; did you mean ‘aarch64_insn_gen_adr’? [-Werror=implicit-function-declaration]
  |    94 |  aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
  |       |  ^~~~~~~~~~~~~~~~~~~~~~
  | arch/arm64/net/bpf_jit_comp.c:912:9: note: in expansion of macro ‘A64_STADD’
  |   912 |    emit(A64_STADD(isdw, reg, src), ctx);
  |       |         ^~~~~~~~~
  | cc1: some warnings being treated as errors

Will
Hou Tao Feb. 17, 2022, 1:55 a.m. UTC | #5
Hi,

On 2/17/2022 1:16 AM, Will Deacon wrote:
> On Fri, Feb 11, 2022 at 03:39:48PM +0100, Daniel Borkmann wrote:
>> On 1/29/22 11:04 PM, Hou Tao wrote:
>>> It is a preparation patch for eBPF atomic supports under arm64. eBPF
>>> needs support atomic[64]_fetch_add, atomic[64]_[fetch_]{and,or,xor} and
>>> atomic[64]_{xchg|cmpxchg}. The ordering semantics of eBPF atomics are
>>> the same with the implementations in linux kernel.
>>>
>>> Add three helpers to support LDCLR/LDEOR/LDSET/SWP, CAS and DMB
>>> instructions. STADD/STCLR/STEOR/STSET are simply encoded as aliases for
>>> LDADD/LDCLR/LDEOR/LDSET with XZR as the destination register, so no extra
>>> helper is added. atomic_fetch_add() and other atomic ops needs support for
>>> STLXR instruction, so extend enum aarch64_insn_ldst_type to do that.
>>>
>>> LDADD/LDEOR/LDSET/SWP and CAS instructions are only available when LSE
>>> atomics is enabled, so just return AARCH64_BREAK_FAULT directly in
>>> these newly-added helpers if CONFIG_ARM64_LSE_ATOMICS is disabled.
>>>
>>> Signed-off-by: Hou Tao <houtao1@huawei.com>
>> Hey Mark / Ard / Will / Catalin or others, could we get an Ack on patch 1 & 2
>> at min if it looks good to you?
> I checked the instruction encodings in patches 1 and 2 and they all look
> fine to me. However, after applying those two locally I get a build failure:
>
>   | In file included from arch/arm64/net/bpf_jit_comp.c:23:
>   | arch/arm64/net/bpf_jit_comp.c: In function ‘build_insn’:
>   | arch/arm64/net/bpf_jit.h:94:2: error: implicit declaration of function ‘aarch64_insn_gen_stadd’; did you mean ‘aarch64_insn_gen_adr’? [-Werror=implicit-function-declaration]
>   |    94 |  aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
>   |       |  ^~~~~~~~~~~~~~~~~~~~~~
>   | arch/arm64/net/bpf_jit_comp.c:912:9: note: in expansion of macro ‘A64_STADD’
>   |   912 |    emit(A64_STADD(isdw, reg, src), ctx);
>   |       |         ^~~~~~~~~
>   | cc1: some warnings being treated as errors
Thanks for your review. The build failure is my fault. I update A64_STADD() in
patch 3 instead of patch 2
after replacing aarch64_insn_get_stadd() by aarch64_insn_gen_atomic_ld_op(), and
will fix it in v4. If you
are trying to test the encoder, I suggest you to apply patch 1~3.

Regards,
Tao
> Will
> .
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 6b776c8667b2..0b6b31307e68 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -205,7 +205,9 @@  enum aarch64_insn_ldst_type {
 	AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
 	AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
 	AARCH64_INSN_LDST_LOAD_EX,
+	AARCH64_INSN_LDST_LOAD_ACQ_EX,
 	AARCH64_INSN_LDST_STORE_EX,
+	AARCH64_INSN_LDST_STORE_REL_EX,
 };
 
 enum aarch64_insn_adsb_type {
@@ -280,6 +282,36 @@  enum aarch64_insn_adr_type {
 	AARCH64_INSN_ADR_TYPE_ADR,
 };
 
+enum aarch64_insn_mem_atomic_op {
+	AARCH64_INSN_MEM_ATOMIC_ADD,
+	AARCH64_INSN_MEM_ATOMIC_CLR,
+	AARCH64_INSN_MEM_ATOMIC_EOR,
+	AARCH64_INSN_MEM_ATOMIC_SET,
+	AARCH64_INSN_MEM_ATOMIC_SWP,
+};
+
+enum aarch64_insn_mem_order_type {
+	AARCH64_INSN_MEM_ORDER_NONE,
+	AARCH64_INSN_MEM_ORDER_ACQ,
+	AARCH64_INSN_MEM_ORDER_REL,
+	AARCH64_INSN_MEM_ORDER_ACQREL,
+};
+
+enum aarch64_insn_mb_type {
+	AARCH64_INSN_MB_SY,
+	AARCH64_INSN_MB_ST,
+	AARCH64_INSN_MB_LD,
+	AARCH64_INSN_MB_ISH,
+	AARCH64_INSN_MB_ISHST,
+	AARCH64_INSN_MB_ISHLD,
+	AARCH64_INSN_MB_NSH,
+	AARCH64_INSN_MB_NSHST,
+	AARCH64_INSN_MB_NSHLD,
+	AARCH64_INSN_MB_OSH,
+	AARCH64_INSN_MB_OSHST,
+	AARCH64_INSN_MB_OSHLD,
+};
+
 #define	__AARCH64_INSN_FUNCS(abbr, mask, val)				\
 static __always_inline bool aarch64_insn_is_##abbr(u32 code)		\
 {									\
@@ -303,6 +335,11 @@  __AARCH64_INSN_FUNCS(store_post,	0x3FE00C00, 0x38000400)
 __AARCH64_INSN_FUNCS(load_post,	0x3FE00C00, 0x38400400)
 __AARCH64_INSN_FUNCS(str_reg,	0x3FE0EC00, 0x38206800)
 __AARCH64_INSN_FUNCS(ldadd,	0x3F20FC00, 0x38200000)
+__AARCH64_INSN_FUNCS(ldclr,	0x3F20FC00, 0x38201000)
+__AARCH64_INSN_FUNCS(ldeor,	0x3F20FC00, 0x38202000)
+__AARCH64_INSN_FUNCS(ldset,	0x3F20FC00, 0x38203000)
+__AARCH64_INSN_FUNCS(swp,	0x3F20FC00, 0x38208000)
+__AARCH64_INSN_FUNCS(cas,	0x3FA07C00, 0x08A07C00)
 __AARCH64_INSN_FUNCS(ldr_reg,	0x3FE0EC00, 0x38606800)
 __AARCH64_INSN_FUNCS(ldr_lit,	0xBF000000, 0x18000000)
 __AARCH64_INSN_FUNCS(ldrsw_lit,	0xFF000000, 0x98000000)
@@ -474,13 +511,6 @@  u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
 				   enum aarch64_insn_register state,
 				   enum aarch64_insn_size_type size,
 				   enum aarch64_insn_ldst_type type);
-u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
-			   enum aarch64_insn_register address,
-			   enum aarch64_insn_register value,
-			   enum aarch64_insn_size_type size);
-u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
-			   enum aarch64_insn_register value,
-			   enum aarch64_insn_size_type size);
 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
 				 enum aarch64_insn_register src,
 				 int imm, enum aarch64_insn_variant variant,
@@ -541,6 +571,42 @@  u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
 			      enum aarch64_insn_prfm_type type,
 			      enum aarch64_insn_prfm_target target,
 			      enum aarch64_insn_prfm_policy policy);
+#ifdef CONFIG_ARM64_LSE_ATOMICS
+u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
+				  enum aarch64_insn_register address,
+				  enum aarch64_insn_register value,
+				  enum aarch64_insn_size_type size,
+				  enum aarch64_insn_mem_atomic_op op,
+				  enum aarch64_insn_mem_order_type order);
+u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
+			 enum aarch64_insn_register address,
+			 enum aarch64_insn_register value,
+			 enum aarch64_insn_size_type size,
+			 enum aarch64_insn_mem_order_type order);
+#else
+static inline
+u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
+				  enum aarch64_insn_register address,
+				  enum aarch64_insn_register value,
+				  enum aarch64_insn_size_type size,
+				  enum aarch64_insn_mem_atomic_op op,
+				  enum aarch64_insn_mem_order_type order)
+{
+	return AARCH64_BREAK_FAULT;
+}
+
+static inline
+u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
+			 enum aarch64_insn_register address,
+			 enum aarch64_insn_register value,
+			 enum aarch64_insn_size_type size,
+			 enum aarch64_insn_mem_order_type order)
+{
+	return AARCH64_BREAK_FAULT;
+}
+#endif
+u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
+
 s32 aarch64_get_branch_offset(u32 insn);
 u32 aarch64_set_branch_offset(u32 insn, s32 offset);
 
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index fccfe363e567..bd119fde8504 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -578,10 +578,16 @@  u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
 
 	switch (type) {
 	case AARCH64_INSN_LDST_LOAD_EX:
+	case AARCH64_INSN_LDST_LOAD_ACQ_EX:
 		insn = aarch64_insn_get_load_ex_value();
+		if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
+			insn |= BIT(15);
 		break;
 	case AARCH64_INSN_LDST_STORE_EX:
+	case AARCH64_INSN_LDST_STORE_REL_EX:
 		insn = aarch64_insn_get_store_ex_value();
+		if (type == AARCH64_INSN_LDST_STORE_REL_EX)
+			insn |= BIT(15);
 		break;
 	default:
 		pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
@@ -603,12 +609,65 @@  u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
 					    state);
 }
 
-u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
-			   enum aarch64_insn_register address,
-			   enum aarch64_insn_register value,
-			   enum aarch64_insn_size_type size)
+#ifdef CONFIG_ARM64_LSE_ATOMICS
+static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
+					  u32 insn)
 {
-	u32 insn = aarch64_insn_get_ldadd_value();
+	u32 order;
+
+	switch (type) {
+	case AARCH64_INSN_MEM_ORDER_NONE:
+		order = 0;
+		break;
+	case AARCH64_INSN_MEM_ORDER_ACQ:
+		order = 2;
+		break;
+	case AARCH64_INSN_MEM_ORDER_REL:
+		order = 1;
+		break;
+	case AARCH64_INSN_MEM_ORDER_ACQREL:
+		order = 3;
+		break;
+	default:
+		pr_err("%s: unknown mem order %d\n", __func__, type);
+		return AARCH64_BREAK_FAULT;
+	}
+
+	insn &= ~GENMASK(23, 22);
+	insn |= order << 22;
+
+	return insn;
+}
+
+u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
+				  enum aarch64_insn_register address,
+				  enum aarch64_insn_register value,
+				  enum aarch64_insn_size_type size,
+				  enum aarch64_insn_mem_atomic_op op,
+				  enum aarch64_insn_mem_order_type order)
+{
+	u32 insn;
+
+	switch (op) {
+	case AARCH64_INSN_MEM_ATOMIC_ADD:
+		insn = aarch64_insn_get_ldadd_value();
+		break;
+	case AARCH64_INSN_MEM_ATOMIC_CLR:
+		insn = aarch64_insn_get_ldclr_value();
+		break;
+	case AARCH64_INSN_MEM_ATOMIC_EOR:
+		insn = aarch64_insn_get_ldeor_value();
+		break;
+	case AARCH64_INSN_MEM_ATOMIC_SET:
+		insn = aarch64_insn_get_ldset_value();
+		break;
+	case AARCH64_INSN_MEM_ATOMIC_SWP:
+		insn = aarch64_insn_get_swp_value();
+		break;
+	default:
+		pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
+		return AARCH64_BREAK_FAULT;
+	}
 
 	switch (size) {
 	case AARCH64_INSN_SIZE_32:
@@ -621,6 +680,8 @@  u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
 
 	insn = aarch64_insn_encode_ldst_size(size, insn);
 
+	insn = aarch64_insn_encode_ldst_order(order, insn);
+
 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
 					    result);
 
@@ -631,17 +692,68 @@  u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
 					    value);
 }
 
-u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
-			   enum aarch64_insn_register value,
-			   enum aarch64_insn_size_type size)
+static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
+					 u32 insn)
 {
-	/*
-	 * STADD is simply encoded as an alias for LDADD with XZR as
-	 * the destination register.
-	 */
-	return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
-				      value, size);
+	u32 order;
+
+	switch (type) {
+	case AARCH64_INSN_MEM_ORDER_NONE:
+		order = 0;
+		break;
+	case AARCH64_INSN_MEM_ORDER_ACQ:
+		order = BIT(22);
+		break;
+	case AARCH64_INSN_MEM_ORDER_REL:
+		order = BIT(15);
+		break;
+	case AARCH64_INSN_MEM_ORDER_ACQREL:
+		order = BIT(15) | BIT(22);
+		break;
+	default:
+		pr_err("%s: unknown mem order %d\n", __func__, type);
+		return AARCH64_BREAK_FAULT;
+	}
+
+	insn &= ~(BIT(15) | BIT(22));
+	insn |= order;
+
+	return insn;
+}
+
+u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
+			 enum aarch64_insn_register address,
+			 enum aarch64_insn_register value,
+			 enum aarch64_insn_size_type size,
+			 enum aarch64_insn_mem_order_type order)
+{
+	u32 insn;
+
+	switch (size) {
+	case AARCH64_INSN_SIZE_32:
+	case AARCH64_INSN_SIZE_64:
+		break;
+	default:
+		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
+		return AARCH64_BREAK_FAULT;
+	}
+
+	insn = aarch64_insn_get_cas_value();
+
+	insn = aarch64_insn_encode_ldst_size(size, insn);
+
+	insn = aarch64_insn_encode_cas_order(order, insn);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+					    result);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+					    address);
+
+	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
+					    value);
 }
+#endif
 
 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
 					enum aarch64_insn_prfm_target target,
@@ -1456,3 +1568,48 @@  u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
 }
+
+u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
+{
+	u32 opt;
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_MB_SY:
+		opt = 0xf;
+		break;
+	case AARCH64_INSN_MB_ST:
+		opt = 0xe;
+		break;
+	case AARCH64_INSN_MB_LD:
+		opt = 0xd;
+		break;
+	case AARCH64_INSN_MB_ISH:
+		opt = 0xb;
+		break;
+	case AARCH64_INSN_MB_ISHST:
+		opt = 0xa;
+		break;
+	case AARCH64_INSN_MB_ISHLD:
+		opt = 0x9;
+		break;
+	case AARCH64_INSN_MB_NSH:
+		opt = 0x7;
+		break;
+	case AARCH64_INSN_MB_NSHST:
+		opt = 0x6;
+		break;
+	case AARCH64_INSN_MB_NSHLD:
+		opt = 0x5;
+		break;
+	default:
+		pr_err("%s: unknown dmb type %d\n", __func__, type);
+		return AARCH64_BREAK_FAULT;
+	}
+
+	insn = aarch64_insn_get_dmb_value();
+	insn &= ~GENMASK(11, 8);
+	insn |= (opt << 8);
+
+	return insn;
+}