@@ -6,7 +6,8 @@
struct arm64_annotate {
regex_t call_insn,
- jump_insn;
+ jump_insn,
+ ldst_insn; /* load and store instruction */
};
static int arm64_mov__parse(struct arch *arch __maybe_unused,
@@ -67,6 +68,57 @@ static struct ins_ops arm64_mov_ops = {
.scnprintf = mov__scnprintf,
};
+static int arm64_ldst__parse(struct arch *arch __maybe_unused,
+ struct ins_operands *ops,
+ struct map_symbol *ms __maybe_unused,
+ struct disasm_line *dl __maybe_unused)
+{
+ char *s, *target;
+
+ /*
+ * The part starting from the memory access annotation '[' is parsed
+ * as 'target', while the part before it is parsed as 'source'.
+ */
+ target = s = strchr(ops->raw, '[');
+ if (!s)
+ return -1;
+
+ while (s > ops->raw && *s != ',')
+ --s;
+
+ if (s == ops->raw)
+ return -1;
+
+ *s = '\0';
+ ops->source.raw = strdup(ops->raw);
+
+ *s = ',';
+ if (!ops->source.raw)
+ return -1;
+
+ ops->target.raw = strdup(target);
+ if (!ops->target.raw) {
+ zfree(ops->source.raw);
+ return -1;
+ }
+ ops->target.mem_ref = true;
+
+ return 0;
+}
+
+static int ldst__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name)
+{
+ return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name,
+ ops->source.name ?: ops->source.raw,
+ ops->target.name ?: ops->target.raw);
+}
+
+static struct ins_ops arm64_ldst_ops = {
+ .parse = arm64_ldst__parse,
+ .scnprintf = ldst__scnprintf,
+};
+
static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const char *name)
{
struct arm64_annotate *arm = arch->priv;
@@ -77,6 +129,8 @@ static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const
ops = &jump_ops;
else if (!regexec(&arm->call_insn, name, 2, match, 0))
ops = &call_ops;
+ else if (!regexec(&arm->ldst_insn, name, 2, match, 0))
+ ops = &arm64_ldst_ops;
else if (!strcmp(name, "ret"))
ops = &ret_ops;
else
@@ -107,6 +161,15 @@ static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
REG_EXTENDED);
if (err)
goto out_free_call;
+ /*
+ * The ARM64 architecture has many variants of load/store instructions.
+ * It is quite challenging to match all of them completely. Here, we
+ * only match the prefixes of these instructions.
+ */
+ err = regcomp(&arm->ldst_insn, "^(ld|st|cas|prf|swp)",
+ REG_EXTENDED);
+ if (err)
+ goto out_free_jump;
arch->initialized = true;
arch->priv = arm;
@@ -117,6 +180,8 @@ static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
arch->e_flags = 0;
return 0;
+out_free_jump:
+ regfree(&arm->jump_insn);
out_free_call:
regfree(&arm->call_insn);
out_free_arm:
Add ldst_ops to handle load and store instructions in order to parse the data types and offsets associated with PMU events for memory access instructions. There are many variants of load and store instructions in ARM64, making it difficult to match all of these instruction names completely. Therefore, only the instruction prefixes are matched. The prefix 'ld|st' covers most of the memory access instructions, 'cas|swp' matches atomic instructions, and 'prf' matches memory prefetch instructions. Signed-off-by: Li Huafei <lihuafei1@huawei.com> --- tools/perf/arch/arm64/annotate/instructions.c | 67 ++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-)