Also, assert that we don't overflow any of two different offsets into
the TB. Both unwind and goto_tb both record a uint16_t for later use.
This fixes an arm-softmmu test case utilizing NEON in which there is
a TB generated that runs to 7800 opcodes, and compiles to 96k on an
x86_64 host. This overflows the 16-bit offset in which we record the
goto_tb reset offset. Because of that overflow, we install a jump
destination that goes to neverland. Boom.
With this reduced op count, the same TB compiles to about 48k for
aarch64, ppc64le, and x86_64 hosts, and neither assertion fires.
Cc: qemu-stable@nongnu.org
Reported-by: "Jason A. Donenfeld" <Jason@zx2c4.com>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
(cherry picked from commit
9f754620651d3432114f4bb89c7f12cbea814b3e)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
tcg_out_insn(s, 3305, LDR, offset, TCG_REG_TMP);
}
tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
- s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, a0);
break;
case INDEX_op_goto_ptr:
tcg_out_movi32(s, COND_AL, base, ptr - dil);
}
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, args[0]);
}
break;
case INDEX_op_goto_ptr:
tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
(intptr_t)(s->tb_jmp_target_addr + a0));
}
- s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, a0);
break;
case INDEX_op_goto_ptr:
/* jmp to the given host address (could be epilogue) */
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
}
tcg_out_nop(s);
- s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, a0);
break;
case INDEX_op_goto_ptr:
/* jmp to the given host address (could be epilogue) */
}
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
tcg_out32(s, BCCTR | BO_ALWAYS);
- s->tb_jmp_reset_offset[args[0]] = c = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, args[0]);
if (USE_REG_TB) {
/* For the unlinked case, need to reset TCG_REG_TB. */
- c = -c;
+ c = -tcg_current_code_size(s);
assert(c == (int16_t)c);
tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, c));
}
/* and go there */
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
}
- s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, a0);
/* For the unlinked path of goto_tb, we need to reset
TCG_REG_TB to the beginning of this TB. */
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
tcg_out_nop(s);
}
- s->tb_jmp_reset_offset[a0] = c = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, a0);
/* For the unlinked path of goto_tb, we need to reset
TCG_REG_TB to the beginning of this TB. */
if (USE_REG_TB) {
- c = -c;
+ c = -tcg_current_code_size(s);
if (check_fit_i32(c, 13)) {
tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
} else {
return l;
}
+static void set_jmp_reset_offset(TCGContext *s, int which)
+{
+ size_t off = tcg_current_code_size(s);
+ s->tb_jmp_reset_offset[which] = off;
+ /* Make sure that we didn't overflow the stored offset. */
+ assert(s->tb_jmp_reset_offset[which] == off);
+}
+
#include "tcg-target.inc.c"
static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
break;
case INDEX_op_insn_start:
if (num_insns >= 0) {
- s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
+ size_t off = tcg_current_code_size(s);
+ s->gen_insn_end_off[num_insns] = off;
+ /* Assert that we do not overflow our stored offset. */
+ assert(s->gen_insn_end_off[num_insns] == off);
}
num_insns++;
for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
/* This is not a hard limit, it merely stops translation when
* we have produced "enough" opcodes. We want to limit TB size
* such that a RISC host can reasonably use a 16-bit signed
- * branch within the TB.
+ * branch within the TB. We also need to be mindful of the
+ * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
+ * and TCGContext.gen_insn_end_off[].
*/
- return tcg_ctx->nb_ops >= 8000;
+ return tcg_ctx->nb_ops >= 4000;
}
/* pool based memory allocation */
/* Indirect jump method. */
TODO();
}
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, args[0]);
break;
case INDEX_op_br:
tci_out_label(s, arg_label(args[0]));