tcg: Zero extend 32-bit addresses for TCI

For native code generation, zero-extending 32-bit addresses for
the slow path helpers happens in tcg_out_{ld,st}_helper_args,
but there isn't really a slow path for TCI, so that didn't happen.

Make the extension for TCI explicit in the opcode stream,
much like we already do for plugins and atomic helpers.

Cc: qemu-stable@nongnu.org
Fixes: 24e46e6c9d ("accel/tcg: Widen tcg-ldst.h addresses to uint64_t")
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2025-12-02 16:40:52 -08:00
parent 864814f71b
commit 41706d3e72

View file

@ -135,6 +135,30 @@ static void tcg_gen_req_mo(TCGBar type)
}
}
static TCGTemp *tci_extend_addr(TCGTemp *addr)
{
#ifdef CONFIG_TCG_INTERPRETER
/*
* 64-bit interpreter requires 64-bit addresses.
* Compare to the extension performed by tcg_out_{ld,st}_helper_args
* for native code generation.
*/
if (TCG_TARGET_REG_BITS == 64 && tcg_ctx->addr_type == TCG_TYPE_I32) {
TCGv_i64 temp = tcg_temp_ebb_new_i64();
tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr));
return tcgv_i64_temp(temp);
}
#endif
return addr;
}
static void maybe_free_addr(TCGTemp *addr, TCGTemp *copy)
{
if (addr != copy) {
tcg_temp_free_internal(copy);
}
}
/* Only required for loads, where value might overlap addr. */
static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr)
{
@ -234,6 +258,7 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
MemOp orig_memop;
MemOpIdx orig_oi, oi;
TCGv_i64 copy_addr;
TCGTemp *addr_new;
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
@ -248,10 +273,12 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
addr_new = tci_extend_addr(addr);
copy_addr = plugin_maybe_preserve_addr(addr);
gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi);
gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I32, tcgv_i32_temp(val), addr_new, oi);
plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi,
QEMU_PLUGIN_MEM_R);
maybe_free_addr(addr, addr_new);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
@ -282,6 +309,7 @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
{
TCGv_i32 swap = NULL;
MemOpIdx orig_oi, oi;
TCGTemp *addr_new;
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
@ -304,8 +332,10 @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi);
addr_new = tci_extend_addr(addr);
gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I32, tcgv_i32_temp(val), addr_new, oi);
plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
maybe_free_addr(addr, addr_new);
if (swap) {
tcg_temp_free_i32(swap);
@ -326,6 +356,7 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
MemOp orig_memop;
MemOpIdx orig_oi, oi;
TCGv_i64 copy_addr;
TCGTemp *addr_new;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
@ -350,10 +381,12 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
addr_new = tci_extend_addr(addr);
copy_addr = plugin_maybe_preserve_addr(addr);
gen_ld_i64(val, addr, oi);
gen_ld_i64(val, addr_new, oi);
plugin_gen_mem_callbacks_i64(val, copy_addr, addr, orig_oi,
QEMU_PLUGIN_MEM_R);
maybe_free_addr(addr, addr_new);
if ((orig_memop ^ memop) & MO_BSWAP) {
int flags = (orig_memop & MO_SIGN
@ -388,6 +421,7 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
{
TCGv_i64 swap = NULL;
MemOpIdx orig_oi, oi;
TCGTemp *addr_new;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
@ -418,8 +452,10 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
oi = make_memop_idx(memop, idx);
}
gen_st_i64(val, addr, oi);
addr_new = tci_extend_addr(addr);
gen_st_i64(val, addr_new, oi);
plugin_gen_mem_callbacks_i64(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
maybe_free_addr(addr, addr_new);
if (swap) {
tcg_temp_free_i64(swap);
@ -530,6 +566,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
{
MemOpIdx orig_oi;
TCGv_i64 ext_addr = NULL;
TCGTemp *addr_new;
check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
@ -557,8 +594,10 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
hi = TCGV128_HIGH(val);
}
addr_new = tci_extend_addr(addr);
gen_ldst2(INDEX_op_qemu_ld2, TCG_TYPE_I128, tcgv_i64_temp(lo),
tcgv_i64_temp(hi), addr, oi);
tcgv_i64_temp(hi), addr_new, oi);
maybe_free_addr(addr, addr_new);
if (need_bswap) {
tcg_gen_bswap64_i64(lo, lo);
@ -586,7 +625,9 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
y = TCGV128_LOW(val);
}
gen_ld_i64(x, addr, make_memop_idx(mop[0], idx));
addr_new = tci_extend_addr(addr);
gen_ld_i64(x, addr_new, make_memop_idx(mop[0], idx));
maybe_free_addr(addr, addr_new);
if (need_bswap) {
tcg_gen_bswap64_i64(x, x);
@ -602,7 +643,9 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
addr_p8 = tcgv_i64_temp(t);
}
gen_ld_i64(y, addr_p8, make_memop_idx(mop[1], idx));
addr_new = tci_extend_addr(addr_p8);
gen_ld_i64(y, addr_new, make_memop_idx(mop[1], idx));
maybe_free_addr(addr_p8, addr_new);
tcg_temp_free_internal(addr_p8);
if (need_bswap) {
@ -636,6 +679,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
{
MemOpIdx orig_oi;
TCGv_i64 ext_addr = NULL;
TCGTemp *addr_new;
check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
@ -666,8 +710,10 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
hi = TCGV128_HIGH(val);
}
addr_new = tci_extend_addr(addr);
gen_ldst2(INDEX_op_qemu_st2, TCG_TYPE_I128,
tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr_new, oi);
maybe_free_addr(addr, addr_new);
if (need_bswap) {
tcg_temp_free_i64(lo);
@ -694,7 +740,9 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
x = b;
}
gen_st_i64(x, addr, make_memop_idx(mop[0], idx));
addr_new = tci_extend_addr(addr);
gen_st_i64(x, addr_new, make_memop_idx(mop[0], idx));
maybe_free_addr(addr, addr_new);
if (tcg_ctx->addr_type == TCG_TYPE_I32) {
TCGv_i32 t = tcg_temp_ebb_new_i32();
@ -706,13 +754,15 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
addr_p8 = tcgv_i64_temp(t);
}
addr_new = tci_extend_addr(addr_p8);
if (b) {
tcg_gen_bswap64_i64(b, y);
gen_st_i64(b, addr_p8, make_memop_idx(mop[1], idx));
gen_st_i64(b, addr_new, make_memop_idx(mop[1], idx));
tcg_temp_free_i64(b);
} else {
gen_st_i64(y, addr_p8, make_memop_idx(mop[1], idx));
gen_st_i64(y, addr_new, make_memop_idx(mop[1], idx));
}
maybe_free_addr(addr_p8, addr_new);
tcg_temp_free_internal(addr_p8);
} else {
if (tcg_ctx->addr_type == TCG_TYPE_I32) {