};
TRANS(UQRSHL_s, do_env_scalar2, a, &f_scalar_uqrshl)
+static bool do_cmop_d(DisasContext *s, arg_rrr_e *a, TCGCond cond)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 t0 = read_fp_dreg(s, a->rn);
+ TCGv_i64 t1 = read_fp_dreg(s, a->rm);
+ tcg_gen_negsetcond_i64(cond, t0, t0, t1);
+ write_fp_dreg(s, a->rd, t0);
+ }
+ return true;
+}
+
+TRANS(CMGT_s, do_cmop_d, a, TCG_COND_GT)
+TRANS(CMHI_s, do_cmop_d, a, TCG_COND_GTU)
+TRANS(CMGE_s, do_cmop_d, a, TCG_COND_GE)
+TRANS(CMHS_s, do_cmop_d, a, TCG_COND_GEU)
+TRANS(CMEQ_s, do_cmop_d, a, TCG_COND_EQ)
+TRANS(CMTST_s, do_cmop_d, a, TCG_COND_TSTNE)
+
static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a,
gen_helper_gvec_3_ptr * const fns[3])
{
TRANS(ADD_v, do_gvec_fn3, a, tcg_gen_gvec_add)
TRANS(SUB_v, do_gvec_fn3, a, tcg_gen_gvec_sub)
+static bool do_cmop_v(DisasContext *s, arg_qrrr_e *a, TCGCond cond)
+{
+ if (a->esz == MO_64 && !a->q) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ tcg_gen_gvec_cmp(cond, a->esz,
+ vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ a->q ? 16 : 8, vec_full_reg_size(s));
+ }
+ return true;
+}
+
+TRANS(CMGT_v, do_cmop_v, a, TCG_COND_GT)
+TRANS(CMHI_v, do_cmop_v, a, TCG_COND_GTU)
+TRANS(CMGE_v, do_cmop_v, a, TCG_COND_GE)
+TRANS(CMHS_v, do_cmop_v, a, TCG_COND_GEU)
+TRANS(CMEQ_v, do_cmop_v, a, TCG_COND_EQ)
+TRANS(CMTST_v, do_gvec_fn3, a, gen_gvec_cmtst)
+
/*
* Advanced SIMD scalar/vector x indexed element
*/
}
}
-static void handle_3same_64(DisasContext *s, int opcode, bool u,
- TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
-{
- /* Handle 64x64->64 opcodes which are shared between the scalar
- * and vector 3-same groups. We cover every opcode where size == 3
- * is valid in either the three-reg-same (integer, not pairwise)
- * or scalar-three-reg-same groups.
- */
- TCGCond cond;
-
- switch (opcode) {
- case 0x6: /* CMGT, CMHI */
- cond = u ? TCG_COND_GTU : TCG_COND_GT;
- do_cmop:
- /* 64 bit integer comparison, result = test ? -1 : 0. */
- tcg_gen_negsetcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
- break;
- case 0x7: /* CMGE, CMHS */
- cond = u ? TCG_COND_GEU : TCG_COND_GE;
- goto do_cmop;
- case 0x11: /* CMTST, CMEQ */
- if (u) {
- cond = TCG_COND_EQ;
- goto do_cmop;
- }
- gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
- break;
- default:
- case 0x1: /* SQADD / UQADD */
- case 0x5: /* SQSUB / UQSUB */
- case 0x8: /* SSHL, USHL */
- case 0x9: /* SQSHL, UQSHL */
- case 0xa: /* SRSHL, URSHL */
- case 0xb: /* SQRSHL, UQRSHL */
- case 0x10: /* ADD, SUB */
- g_assert_not_reached();
- }
-}
-
/* AdvSIMD scalar three same
* 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
* +-----+---+-----------+------+---+------+--------+---+------+------+
TCGv_i64 tcg_rd;
switch (opcode) {
- case 0x6: /* CMGT, CMHI */
- case 0x7: /* CMGE, CMHS */
- case 0x11: /* CMTST, CMEQ */
- if (size != 3) {
- unallocated_encoding(s);
- return;
- }
- break;
case 0x16: /* SQDMULH, SQRDMULH (vector) */
if (size != 1 && size != 2) {
unallocated_encoding(s);
default:
case 0x1: /* SQADD, UQADD */
case 0x5: /* SQSUB, UQSUB */
+ case 0x6: /* CMGT, CMHI */
+ case 0x7: /* CMGE, CMHS */
case 0x8: /* SSHL, USHL */
case 0x9: /* SQSHL, UQSHL */
case 0xa: /* SRSHL, URSHL */
case 0xb: /* SQRSHL, UQRSHL */
case 0x10: /* ADD, SUB (vector) */
+ case 0x11: /* CMTST, CMEQ */
unallocated_encoding(s);
return;
}
tcg_rd = tcg_temp_new_i64();
if (size == 3) {
- TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
- TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
-
- handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
+ g_assert_not_reached();
} else {
/* Do a single operation on the lowest element in the vector.
* We use the standard Neon helpers and rely on 0 OP 0 == 0 with
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
int pass;
- TCGCond cond;
switch (opcode) {
case 0x13: /* MUL, PMUL */
case 0x01: /* SQADD, UQADD */
case 0x05: /* SQSUB, UQSUB */
+ case 0x06: /* CMGT, CMHI */
+ case 0x07: /* CMGE, CMHS */
case 0x08: /* SSHL, USHL */
case 0x09: /* SQSHL, UQSHL */
case 0x0a: /* SRSHL, URSHL */
case 0x0b: /* SQRSHL, UQRSHL */
case 0x10: /* ADD, SUB */
+ case 0x11: /* CMTST, CMEQ */
unallocated_encoding(s);
return;
}
gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
}
return;
- case 0x11:
- if (!u) { /* CMTST */
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
- return;
- }
- /* else CMEQ */
- cond = TCG_COND_EQ;
- goto do_gvec_cmp;
- case 0x06: /* CMGT, CMHI */
- cond = u ? TCG_COND_GTU : TCG_COND_GT;
- goto do_gvec_cmp;
- case 0x07: /* CMGE, CMHS */
- cond = u ? TCG_COND_GEU : TCG_COND_GE;
- do_gvec_cmp:
- tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
- vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm),
- is_q ? 16 : 8, vec_full_reg_size(s));
- return;
}
if (size == 3) {
- assert(is_q);
- for (pass = 0; pass < 2; pass++) {
- TCGv_i64 tcg_op1 = tcg_temp_new_i64();
- TCGv_i64 tcg_op2 = tcg_temp_new_i64();
- TCGv_i64 tcg_res = tcg_temp_new_i64();
-
- read_vec_element(s, tcg_op1, rn, pass, MO_64);
- read_vec_element(s, tcg_op2, rm, pass, MO_64);
-
- handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
-
- write_vec_element(s, tcg_res, rd, pass, MO_64);
- }
+ g_assert_not_reached();
} else {
for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
TCGv_i32 tcg_op1 = tcg_temp_new_i32();