Ensure direct jump patching in TCI is atomic by:
* naturally aligning a location of direct jump address;
* using atomic_read()/atomic_set() to load/store the address.
Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Message-Id: <
1461341333-19646-4-git-send-email-sergey.fedorov@linaro.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
/* patch the branch destination */
- *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
+ atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
/* no need to flush icache explicitly */
}
#elif defined(_ARCH_PPC)
if (s->tb_jmp_offset) {
/* Direct jump method. */
tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_offset));
+ /* Align for atomic patching and thread safety */
+ s->code_ptr = QEMU_ALIGN_PTR_UP(s->code_ptr, 4);
s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
tcg_out32(s, 0);
} else {
goto exit;
break;
case INDEX_op_goto_tb:
- t0 = tci_read_i32(&tb_ptr);
+ /* Jump address is aligned */
+ tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4);
+ t0 = atomic_read((int32_t *)tb_ptr);
+ tb_ptr += sizeof(int32_t);
tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr += (int32_t)t0;
continue;