#define op0_Rd .special = X86_SPECIAL_Op0_Rd,
#define op2_Ry .special = X86_SPECIAL_Op2_Ry,
#define avx_movx .special = X86_SPECIAL_AVXExtMov,
+#define sextT0 .special = X86_SPECIAL_SExtT0,
+#define zextT0 .special = X86_SPECIAL_ZExtT0,
#define vex1 .vex_class = 1,
#define vex1_rep3 .vex_class = 1, .vex_special = X86_VEX_REPScalar,
[5] = {
X86_OP_ENTRY3(BZHI, G,y, E,y, B,y, vex13 cpuid(BMI1)),
{},
- X86_OP_ENTRY3(PEXT, G,y, B,y, E,y, vex13 cpuid(BMI2)),
- X86_OP_ENTRY3(PDEP, G,y, B,y, E,y, vex13 cpuid(BMI2)),
+ X86_OP_ENTRY3(PEXT, G,y, B,y, E,y, vex13 zextT0 cpuid(BMI2)),
+ X86_OP_ENTRY3(PDEP, G,y, B,y, E,y, vex13 zextT0 cpuid(BMI2)),
{},
},
[6] = {
{},
},
[7] = {
- X86_OP_ENTRY3(BEXTR, G,y, E,y, B,y, vex13 cpuid(BMI1)),
+ X86_OP_ENTRY3(BEXTR, G,y, E,y, B,y, vex13 zextT0 cpuid(BMI1)),
X86_OP_ENTRY3(SHLX, G,y, E,y, B,y, vex13 cpuid(BMI1)),
- X86_OP_ENTRY3(SARX, G,y, E,y, B,y, vex13 cpuid(BMI1)),
- X86_OP_ENTRY3(SHRX, G,y, E,y, B,y, vex13 cpuid(BMI1)),
+ X86_OP_ENTRY3(SARX, G,y, E,y, B,y, vex13 sextT0 cpuid(BMI1)),
+ X86_OP_ENTRY3(SHRX, G,y, E,y, B,y, vex13 zextT0 cpuid(BMI1)),
{},
},
};
}
break;
+ case X86_SPECIAL_SExtT0:
+ case X86_SPECIAL_ZExtT0:
+ /* Handled in gen_load. */
+ assert(decode.op[1].unit == X86_OP_INT);
+ break;
+
default:
break;
}
break;
case X86_OP_INT:
if (op->has_ea) {
- gen_op_ld_v(s, op->ot, v, s->A0);
+ if (v == s->T0 && decode->e.special == X86_SPECIAL_SExtT0) {
+ gen_op_ld_v(s, op->ot | MO_SIGN, v, s->A0);
+ } else {
+ gen_op_ld_v(s, op->ot, v, s->A0);
+ }
+
+ } else if (op->ot == MO_8 && byte_reg_is_xH(s, op->n)) {
+ if (v == s->T0 && decode->e.special == X86_SPECIAL_SExtT0) {
+ tcg_gen_sextract_tl(v, cpu_regs[op->n - 4], 8, 8);
+ } else {
+ tcg_gen_extract_tl(v, cpu_regs[op->n - 4], 8, 8);
+ }
+
+ } else if (op->ot < MO_TL && v == s->T0 &&
+ (decode->e.special == X86_SPECIAL_SExtT0 ||
+ decode->e.special == X86_SPECIAL_ZExtT0)) {
+ if (decode->e.special == X86_SPECIAL_SExtT0) {
+ tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot | MO_SIGN);
+ } else {
+ tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot);
+ }
+
} else {
- gen_op_mov_v_reg(s, op->ot, v, op->n);
+ tcg_gen_mov_tl(v, cpu_regs[op->n]);
}
break;
case X86_OP_IMM:
* Shifts larger than operand size get zeros.
*/
tcg_gen_ext8u_tl(s->A0, s->T1);
- if (TARGET_LONG_BITS == 64 && ot == MO_32) {
- tcg_gen_ext32u_tl(s->T0, s->T0);
- }
tcg_gen_shr_tl(s->T0, s->T0, s->A0);
tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, s->T0, zero);
static void gen_PDEP(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- MemOp ot = decode->op[1].ot;
- if (ot < MO_64) {
- tcg_gen_ext32u_tl(s->T0, s->T0);
- }
gen_helper_pdep(s->T0, s->T0, s->T1);
}
static void gen_PEXT(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- MemOp ot = decode->op[1].ot;
- if (ot < MO_64) {
- tcg_gen_ext32u_tl(s->T0, s->T0);
- }
gen_helper_pext(s->T0, s->T0, s->T1);
}
mask = ot == MO_64 ? 63 : 31;
tcg_gen_andi_tl(s->T1, s->T1, mask);
- if (ot != MO_64) {
- tcg_gen_ext32s_tl(s->T0, s->T0);
- }
tcg_gen_sar_tl(s->T0, s->T0, s->T1);
}
mask = ot == MO_64 ? 63 : 31;
tcg_gen_andi_tl(s->T1, s->T1, mask);
- if (ot != MO_64) {
- tcg_gen_ext32u_tl(s->T0, s->T0);
- }
tcg_gen_shr_tl(s->T0, s->T0, s->T1);
}