@@ -283,14 +283,18 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
uint32_t esz = 1 << log2_esz;
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
uint32_t vta = vext_vta(desc);
+ uint32_t vma = vext_vma(desc);
for (i = env->vstart; i < env->vl; i++, env->vstart++) {
- if (!vm && !vext_elem_mask(v0, i)) {
- continue;
- }
-
k = 0;
while (k < nf) {
+ if (!vm && !vext_elem_mask(v0, i)) {
+ /* set masked-off elements to 1s */
+ vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ (i + k * max_elems + 1) * esz);
+ k++;
+ continue;
+ }
target_ulong addr = base + stride * i + (k << log2_esz);
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
k++;
@@ -482,15 +486,19 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
uint32_t esz = 1 << log2_esz;
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
uint32_t vta = vext_vta(desc);
+ uint32_t vma = vext_vma(desc);
/* load bytes from guest memory */
for (i = env->vstart; i < env->vl; i++, env->vstart++) {
- if (!vm && !vext_elem_mask(v0, i)) {
- continue;
- }
-
k = 0;
while (k < nf) {
+ if (!vm && !vext_elem_mask(v0, i)) {
+ /* set masked-off elements to 1s */
+ vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ (i + k * max_elems + 1) * esz);
+ k++;
+ continue;
+ }
abi_ptr addr = get_index_addr(base, i, vs2) + (k << log2_esz);
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
k++;
@@ -579,6 +587,7 @@ vext_ldff(void *vd, void *v0, target_ulong base,
uint32_t esz = 1 << log2_esz;
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
uint32_t vta = vext_vta(desc);
+ uint32_t vma = vext_vma(desc);
target_ulong addr, offset, remain;
/* probe every access*/
@@ -624,10 +633,14 @@ ProbeSuccess:
}
for (i = env->vstart; i < env->vl; i++) {
k = 0;
- if (!vm && !vext_elem_mask(v0, i)) {
- continue;
- }
while (k < nf) {
+ if (!vm && !vext_elem_mask(v0, i)) {
+ /* set masked-off elements to 1s */
+ vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ (i + k * max_elems + 1) * esz);
+ k++;
+ continue;
+ }
target_ulong addr = base + ((i * nf + k) << log2_esz);
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
k++;
@@ -712,6 +712,7 @@ static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
}
@@ -777,6 +778,7 @@ static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, NF, 1);
/* Mask destination register are always tail-agnostic */
data = FIELD_DP32(data, VDATA, VTA, s->cfg_vta_all_1s);
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
}
@@ -866,6 +868,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
}
@@ -996,6 +999,7 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
}
@@ -1114,6 +1118,7 @@ static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
return ldff_trans(a->rd, a->rs1, data, fn, s);
}