tcg_gen_xor_i64(t1, result, r1);
tcg_gen_xor_i64(t0, r1, r2);
tcg_gen_andc_i64(t1, t1, t0);
- tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t1, 32);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* calc AV/SAV bits */
- tcg_gen_trunc_shr_i64_i32(temp, result, 32);
+ tcg_gen_extrh_i64_i32(temp, result);
tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
/* calc SAV */
tcg_gen_xor_i64(t3, t4, t1);
tcg_gen_xor_i64(t2, t1, t2);
tcg_gen_andc_i64(t3, t3, t2);
- tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t3, 32);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
/* We produce an overflow on the host if the mul before was
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
tcg_gen_xor_i64(t1, result, r1);
tcg_gen_xor_i64(t0, r1, r2);
tcg_gen_and_i64(t1, t1, t0);
- tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t1, 32);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* calc AV/SAV bits */
- tcg_gen_trunc_shr_i64_i32(temp, result, 32);
+ tcg_gen_extrh_i64_i32(temp, result);
tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
/* calc SAV */
tcg_gen_xor_i64(t3, t4, t1);
tcg_gen_xor_i64(t2, t1, t2);
tcg_gen_and_i64(t3, t3, t2);
- tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t3, 32);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
/* We produce an overflow on the host if the mul before was
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
dest = (t1 & ~0x0f00) | ((t2 << 8) & 0x0f00)
-* trunc_shr_i64_i32 t0, t1, pos
+* extrl_i64_i32 t0, t1
-For 64-bit hosts only, right shift the 64-bit input T1 by POS and
-truncate to 32-bit output T0. Depending on the host, this may be
-a simple mov/shift, or may require additional canonicalization.
+For 64-bit hosts only, extract the low 32-bits of input T1 and place it
+into 32-bit output T0. Depending on the host, this may be a simple move,
+or may require additional canonicalization.
+
+* extrh_i64_i32 t0, t1
+
+For 64-bit hosts only, extract the high 32-bits of input T1 and place it
+into 32-bit output T0. Depending on the host, this may be a simple shift,
+or may require additional canonicalization.
********* Conditional moves
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
#define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 1
#define TCG_TARGET_HAS_mulsh_i32 0
#if TCG_TARGET_REG_BITS == 64
-#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
#define TCG_TARGET_HAS_div2_i64 1
#define TCG_TARGET_HAS_rot_i64 1
#define TCG_TARGET_HAS_ext8s_i64 1
#define TCG_TARGET_HAS_muluh_i64 0
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_mulsh_i64 0
-#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16)
#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16)
case INDEX_op_shr_i32:
return (uint32_t)x >> (y & 31);
- case INDEX_op_trunc_shr_i64_i32:
case INDEX_op_shr_i64:
return (uint64_t)x >> (y & 63);
return (int32_t)x;
case INDEX_op_extu_i32_i64:
+ case INDEX_op_extrl_i64_i32:
case INDEX_op_ext32u_i64:
return (uint32_t)x;
+ case INDEX_op_extrh_i64_i32:
+ return (uint64_t)x >> 32;
+
case INDEX_op_muluh_i32:
return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
case INDEX_op_mulsh_i32:
}
break;
- case INDEX_op_trunc_shr_i64_i32:
- mask = (uint64_t)temps[args[1]].mask >> args[2];
+ case INDEX_op_extrl_i64_i32:
+ mask = (uint32_t)temps[args[1]].mask;
+ break;
+ case INDEX_op_extrh_i64_i32:
+ mask = (uint64_t)temps[args[1]].mask >> 32;
break;
CASE_OP_32_64(shl):
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
+ case INDEX_op_extrl_i64_i32:
+ case INDEX_op_extrh_i64_i32:
if (temp_is_const(args[1])) {
tmp = do_constant_folding(opc, temps[args[1]].val, 0);
tcg_opt_gen_movi(s, op, args, args[0], tmp);
}
goto do_default;
- case INDEX_op_trunc_shr_i64_i32:
- if (temp_is_const(args[1])) {
- tmp = do_constant_folding(opc, temps[args[1]].val, args[2]);
- tcg_opt_gen_movi(s, op, args, args[0], tmp);
- break;
- }
- goto do_default;
-
CASE_OP_32_64(add):
CASE_OP_32_64(sub):
CASE_OP_32_64(mul):
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_add2_i32 0
#define TCG_TARGET_HAS_sub2_i32 0
-#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
#define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 0
#define TCG_TARGET_HAS_rot_i64 1
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
#define TCG_TARGET_HAS_div2_i64 1
#define TCG_TARGET_HAS_rot_i64 1
case INDEX_op_ext32u_i64:
tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
break;
- case INDEX_op_trunc_shr_i64_i32:
- if (a2 == 0) {
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
- } else {
- tcg_out_arithi(s, a0, a1, a2, SHIFT_SRLX);
- }
+ case INDEX_op_extrl_i64_i32:
+ tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
+ break;
+ case INDEX_op_extrh_i64_i32:
+ tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
break;
case INDEX_op_brcond_i64:
{ INDEX_op_ext32u_i64, { "R", "R" } },
{ INDEX_op_ext_i32_i64, { "R", "r" } },
{ INDEX_op_extu_i32_i64, { "R", "r" } },
- { INDEX_op_trunc_shr_i64_i32, { "r", "R" } },
+ { INDEX_op_extrl_i64_i32, { "r", "R" } },
+ { INDEX_op_extrh_i64_i32, { "r", "R" } },
{ INDEX_op_brcond_i64, { "RZ", "RJ" } },
{ INDEX_op_setcond_i64, { "R", "RZ", "RJ" } },
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_trunc_shr_i64_i32 1
+#define TCG_TARGET_HAS_extrl_i64_i32 1
+#define TCG_TARGET_HAS_extrh_i64_i32 1
#define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 0
#define TCG_TARGET_HAS_rot_i64 0
/* Size changing operations. */
-void tcg_gen_trunc_shr_i64_i32(TCGv_i32 ret, TCGv_i64 arg, unsigned count)
+void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
{
- tcg_debug_assert(count < 64);
if (TCG_TARGET_REG_BITS == 32) {
- if (count >= 32) {
- tcg_gen_shri_i32(ret, TCGV_HIGH(arg), count - 32);
- } else if (count == 0) {
- tcg_gen_mov_i32(ret, TCGV_LOW(arg));
- } else {
- TCGv_i64 t = tcg_temp_new_i64();
- tcg_gen_shri_i64(t, arg, count);
- tcg_gen_mov_i32(ret, TCGV_LOW(t));
- tcg_temp_free_i64(t);
- }
- } else if (TCG_TARGET_HAS_trunc_shr_i64_i32) {
- tcg_gen_op3(&tcg_ctx, INDEX_op_trunc_shr_i64_i32,
- GET_TCGV_I32(ret), GET_TCGV_I64(arg), count);
- } else if (count == 0) {
+ tcg_gen_mov_i32(ret, TCGV_LOW(arg));
+ } else if (TCG_TARGET_HAS_extrl_i64_i32) {
+ tcg_gen_op2(&tcg_ctx, INDEX_op_extrl_i64_i32,
+ GET_TCGV_I32(ret), GET_TCGV_I64(arg));
+ } else {
tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg)));
+ }
+}
+
+void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
+ } else if (TCG_TARGET_HAS_extrh_i64_i32) {
+ tcg_gen_op2(&tcg_ctx, INDEX_op_extrh_i64_i32,
+ GET_TCGV_I32(ret), GET_TCGV_I64(arg));
} else {
TCGv_i64 t = tcg_temp_new_i64();
- tcg_gen_shri_i64(t, arg, count);
+ tcg_gen_shri_i64(t, arg, 32);
tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(t)));
tcg_temp_free_i64(t);
}
tcg_gen_mov_i32(lo, TCGV_LOW(arg));
tcg_gen_mov_i32(hi, TCGV_HIGH(arg));
} else {
- tcg_gen_trunc_shr_i64_i32(lo, arg, 0);
- tcg_gen_trunc_shr_i64_i32(hi, arg, 32);
+ tcg_gen_extrl_i64_i32(lo, arg);
+ tcg_gen_extrh_i64_i32(hi, arg);
}
}
void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high);
-void tcg_gen_trunc_shr_i64_i32(TCGv_i32 ret, TCGv_i64 arg, unsigned int c);
+void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
+void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg);
void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg);
static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
{
- tcg_gen_trunc_shr_i64_i32(ret, arg, 0);
+ tcg_gen_extrl_i64_i32(ret, arg);
}
/* QEMU specific operations. */
/* size changing ops */
DEF(ext_i32_i64, 1, 1, 0, IMPL64)
DEF(extu_i32_i64, 1, 1, 0, IMPL64)
-DEF(trunc_shr_i64_i32, 1, 1, 1,
- IMPL(TCG_TARGET_HAS_trunc_shr_i64_i32)
+DEF(extrl_i64_i32, 1, 1, 0,
+ IMPL(TCG_TARGET_HAS_extrl_i64_i32)
+ | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
+DEF(extrh_i64_i32, 1, 1, 0,
+ IMPL(TCG_TARGET_HAS_extrh_i64_i32)
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | IMPL64)
#if TCG_TARGET_REG_BITS == 32
/* Turn some undef macros into false macros. */
-#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
#define TCG_TARGET_HAS_div_i64 0
#define TCG_TARGET_HAS_rem_i64 0
#define TCG_TARGET_HAS_div2_i64 0
#define TCG_TARGET_HAS_mulsh_i32 0
#if TCG_TARGET_REG_BITS == 64
-#define TCG_TARGET_HAS_trunc_shr_i64_i32 0
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
#define TCG_TARGET_HAS_bswap16_i64 1
#define TCG_TARGET_HAS_bswap32_i64 1
#define TCG_TARGET_HAS_bswap64_i64 1