@@ -171,6 +171,8 @@ DEF_HELPER_FLAGS_3(VMULOSW, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULOUB, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULOUH, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_FLAGS_3(VMULOUW, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VDIVSQ, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VDIVUQ, TCG_CALL_NO_RWG, void, avr, avr, avr)
DEF_HELPER_3(vslo, void, avr, avr, avr)
DEF_HELPER_3(vsro, void, avr, avr, avr)
DEF_HELPER_3(vsrv, void, avr, avr, avr)
@@ -710,3 +710,5 @@ VDIVSW 000100 ..... ..... ..... 00110001011 @VX
VDIVUW 000100 ..... ..... ..... 00010001011 @VX
VDIVSD 000100 ..... ..... ..... 00111001011 @VX
VDIVUD 000100 ..... ..... ..... 00011001011 @VX
+VDIVSQ 000100 ..... ..... ..... 00100001011 @VX
+VDIVUQ 000100 ..... ..... ..... 00000001011 @VX
@@ -1036,6 +1036,24 @@ void helper_XXPERMX(ppc_vsr_t *t, ppc_vsr_t *s0, ppc_vsr_t *s1, ppc_vsr_t *pcv,
*t = tmp;
}
+void helper_VDIVSQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
+{
+ if (int128_nz(b->s128)) {
+ t->s128 = int128_divs(a->s128, b->s128);
+ } else {
+ t->s128 = int128_zero(); /* Undefined behavior */
+ }
+}
+
+void helper_VDIVUQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
+{
+ if (int128_nz(b->s128)) {
+ t->s128 = int128_divu(a->s128, b->s128);
+ } else {
+ t->s128 = int128_zero(); /* Undefined behavior */
+ }
+}
+
void helper_VPERM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
ppc_avr_t result;
@@ -3284,6 +3284,8 @@ TRANS_VDIV_VMOD(ISA310, VDIVSW, MO_32, do_vx_div_i32 , NULL)
TRANS_VDIV_VMOD(ISA310, VDIVUW, MO_32, do_vx_divu_i32, NULL)
TRANS_VDIV_VMOD(ISA310, VDIVSD, MO_64, NULL, do_vx_div_i64)
TRANS_VDIV_VMOD(ISA310, VDIVUD, MO_64, NULL, do_vx_divu_i64)
+TRANS_FLAGS2(ISA310, VDIVSQ, do_vx_helper, gen_helper_VDIVSQ)
+TRANS_FLAGS2(ISA310, VDIVUQ, do_vx_helper, gen_helper_VDIVUQ)
#undef GEN_VR_LDX
#undef GEN_VR_STX