1738 lines
50 KiB
C
1738 lines
50 KiB
C
#include "qemu/osdep.h"
|
|
#include "exec/memop.h"
|
|
#include "hw/core/cpu.h"
|
|
#include "qemu/compiler.h"
|
|
#include "cpu.h"
|
|
#include "exec/translator.h"
|
|
#include "accel/tcg/cpu-ldst.h"
|
|
#include "qemu/typedefs.h"
|
|
#include "qemu/qemu-print.h"
|
|
#include "tcg/tcg-cond.h"
|
|
#include "tcg/tcg-op-common.h"
|
|
#include "tcg/tcg-op.h"
|
|
#include "exec/helper-gen.h"
|
|
#include "exec/helper-proto.h"
|
|
#include "tcg/tcg.h"
|
|
|
|
#define HELPER_H "helper.h"
|
|
#include "exec/helper-info.c.inc"
|
|
#undef HELPER_H
|
|
|
|
/* Representation of the condition encoding */
|
|
enum {
|
|
CR16C_COND_EQ,
|
|
CR16C_COND_NE,
|
|
CR16C_COND_CS,
|
|
CR16C_COND_CC,
|
|
CR16C_COND_HI,
|
|
CR16C_COND_LS,
|
|
CR16C_COND_GT,
|
|
CR16C_COND_LE,
|
|
CR16C_COND_FS,
|
|
CR16C_COND_FC,
|
|
CR16C_COND_LO,
|
|
CR16C_COND_HS,
|
|
CR16C_COND_LT,
|
|
CR16C_COND_GE,
|
|
CR16C_COND_ALWAYS,
|
|
CR16C_COND_UC,
|
|
};
|
|
|
|
/* Representation of register pair encoding */
|
|
enum {
|
|
CR16C_RP_R1R0,
|
|
CR16C_RP_R2R1,
|
|
CR16C_RP_R3R2,
|
|
CR16C_RP_R4R3,
|
|
CR16C_RP_R5R4,
|
|
CR16C_RP_R6R5,
|
|
CR16C_RP_R7R6,
|
|
CR16C_RP_R8R7,
|
|
CR16C_RP_R9R8,
|
|
CR16C_RP_R10R9,
|
|
CR16C_RP_R11R10,
|
|
CR16C_RP_R12LR11,
|
|
// TODO these are ~funky, depend on CFG.SR and PSR.U
|
|
/*CR16C_RP_R12,
|
|
CR16C_RP_R13,
|
|
CR16C_RP_RA,
|
|
CR16C_RP_SP,*/
|
|
};
|
|
|
|
typedef struct DisasContext {
|
|
DisasContextBase base;
|
|
CPUCR16CState* env;
|
|
} DisasContext;
|
|
|
|
|
|
/* Registers */
|
|
static TCGv pc; // 24 bit
|
|
// ISPH/L (Interrupt Stack Pointer)
|
|
// USPH/L (User Stack Pointer)
|
|
// INTBASEH/L (Interrupt Base)
|
|
|
|
// PSR 16
|
|
// these flags are part of PSR
|
|
static TCGv psr_n;
|
|
static TCGv psr_z;
|
|
static TCGv psr_f;
|
|
static TCGv psr_l;
|
|
static TCGv psr_c;
|
|
static TCGv psr_t;
|
|
static TCGv psr_u;
|
|
static TCGv psr_e;
|
|
static TCGv psr_p;
|
|
static TCGv psr_i;
|
|
|
|
// CFG
|
|
// Most notably: SR bit "Short Register"
|
|
// => switches to register pairings for reassembled code originally written for CR16B
|
|
static TCGv cfg_dc;
|
|
static TCGv cfg_ldc;
|
|
static TCGv cfg_ic;
|
|
static TCGv cfg_lic;
|
|
static TCGv cfg_ed;
|
|
static TCGv cfg_sr;
|
|
|
|
// debug registers (optional) "depends on the configuration of the chip"
|
|
|
|
/* General purpose registers, incl. RA and SP */
|
|
static TCGv r[CR16C_REG_COUNT];
|
|
|
|
|
|
static void cr16c_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) {
|
|
DisasContext* ctx = container_of(dcbase, DisasContext, base);
|
|
CPUCR16CState* env = cpu_env(cs);
|
|
|
|
ctx->env = env;
|
|
}
|
|
|
|
static void cr16c_tr_tb_start(DisasContextBase *base, CPUState *cs) { }
|
|
|
|
static void cr16c_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) {
|
|
DisasContext* ctx = container_of(dcbase, DisasContext, base);
|
|
|
|
tcg_gen_insn_start(ctx->base.pc_next);
|
|
}
|
|
|
|
static uint64_t decode_load_bytes(DisasContext *ctx, uint64_t insn, int i, int n) {
|
|
for(; i < n; i+=2) {
|
|
insn |= (uint64_t)translator_lduw(ctx->env, &ctx->base, ctx->base.pc_next) << (48 - i * 8);
|
|
ctx->base.pc_next += 2;
|
|
}
|
|
if (i == n)
|
|
return insn;
|
|
else {
|
|
gen_helper_raise_illegal_instruction(tcg_env);
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
/*** Instruction translations ***/
|
|
|
|
static int16_t u4_load_s16(DisasContext *ctx, int val) {
|
|
if (val == 0x9) {
|
|
return -1;
|
|
}
|
|
else if (val == 0xB) {
|
|
int16_t val_ld = cpu_ldsw_le_data(ctx->env, ctx->base.pc_next);
|
|
ctx->base.pc_next += 2;
|
|
return val_ld;
|
|
}
|
|
return val;
|
|
}
|
|
|
|
static uint16_t load_u16(DisasContext *ctx) {
|
|
uint16_t imm = cpu_lduw_code(ctx->env, ctx->base.pc_next);
|
|
ctx->base.pc_next += 2;
|
|
return imm;
|
|
}
|
|
|
|
static uint8_t get_disp4(DisasContext *ctx, uint8_t disp) {
|
|
return disp << 1;
|
|
}
|
|
|
|
static int32_t disp8_get_dest(DisasContext* ctx, int32_t disp) {
|
|
int32_t dest = ctx->base.pc_next - 2;
|
|
if (disp == 0xFFFFFF80) {
|
|
dest += cpu_ldsw_le_data(ctx->env, ctx->base.pc_next);
|
|
ctx->base.pc_next += 2;
|
|
}
|
|
else {
|
|
dest += disp << 1;
|
|
}
|
|
return dest;
|
|
};
|
|
|
|
static uint32_t reloc_abs20(DisasContext *ctx, uint32_t addr) {
|
|
if (addr > 0xEFFFF) {
|
|
addr |= 0xF00000;
|
|
}
|
|
return addr;
|
|
}
|
|
|
|
static void gen_goto(DisasContextBase *dcbase, vaddr dest, uint8_t slot) {
|
|
if(translator_use_goto_tb(dcbase, dest)) {
|
|
tcg_gen_goto_tb(slot);
|
|
tcg_gen_movi_i32(pc, dest);
|
|
tcg_gen_exit_tb(dcbase->tb, slot);
|
|
}
|
|
else {
|
|
tcg_gen_movi_i32(pc, dest);
|
|
tcg_gen_lookup_and_goto_ptr();
|
|
}
|
|
}
|
|
|
|
static MemOp unsigned_op_by_width[] = {0, MO_UB, MO_UW, 0, MO_UL};
|
|
|
|
static void gen_compute_addr_disp(TCGv_i32 dest, int ra_id, int disp, bool dbase) {
|
|
if (dbase && ra_id < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(dest, r[ra_id], r[ra_id+1], 16, 16);
|
|
}
|
|
else if (!dbase) {
|
|
tcg_gen_andi_i32(dest, r[ra_id], 0xFFFF);
|
|
}
|
|
else {
|
|
tcg_gen_mov_i32(dest, r[ra_id]);
|
|
}
|
|
tcg_gen_addi_i32(dest, dest, disp);
|
|
}
|
|
|
|
|
|
static void gen_compute_rrp_addr(TCGv_i32 dest, uint8_t rrp, uint32_t disp) {
|
|
TCGv_i32 index_reg = r[12 + rrp/8];
|
|
uint8_t rp_id = rrp % 8;
|
|
uint8_t addr_rp_num = rp_id < 6 ? (rp_id * 2) : (3 + rp_id * 2);
|
|
|
|
tcg_gen_deposit_i32(r[addr_rp_num], r[addr_rp_num], r[addr_rp_num + 1], 16, 16);
|
|
tcg_gen_add_i32(dest, index_reg, r[addr_rp_num]);
|
|
tcg_gen_addi_i32(dest, dest, disp);
|
|
}
|
|
|
|
|
|
|
|
// Include generated decodetree function declarations
|
|
#include "decode-insn.c.inc"
|
|
|
|
/* Moves */
|
|
|
|
static bool trans_MOV_imm(DisasContext* ctx, arg_MOV_imm* a) {
|
|
int len = (a->width == 4 ? 2 : a->width) * 8;
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], tcg_constant_i32(a->imm), 0, len);
|
|
if (a->width == 4 && a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_movi_i32(r[a->rd + 1], a->imm >> 16);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MOV_reg(DisasContext* ctx, arg_MOV_reg* a) {
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], r[a->rs], 0, a->width * 8);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MOVD_reg(DisasContext* ctx, arg_MOVD_reg* a) {
|
|
if (a->rs < CR16C_FIRST_32B_REG && a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_mov_i32(r[a->rd], r[a->rs]);
|
|
tcg_gen_mov_i32(r[a->rd+1], r[a->rs+1]);
|
|
}
|
|
else if (a->rs >= CR16C_FIRST_32B_REG && a->rd >= CR16C_FIRST_32B_REG) {
|
|
tcg_gen_mov_i32(r[a->rd], r[a->rs]);
|
|
}
|
|
else if (a->rs < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rs], r[a->rs+1], 16, 16);
|
|
}
|
|
else { /* <=> a->rd < CR16C_FIRST_32B_REG */
|
|
tcg_gen_mov_i32(r[a->rd], r[a->rs]);
|
|
tcg_gen_shri_i32(r[a->rd+1], r[a->rs], 16);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MOVXB(DisasContext* ctx, arg_MOVXB* a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
tcg_gen_ext8s_i32(temp, r[a->rs]);
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp, 0, 16);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MOVXW(DisasContext* ctx, arg_MOVXW* a) {
|
|
tcg_gen_ext16s_i32(r[a->rd], r[a->rs]);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(r[a->rd+1], r[a->rd], 16);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MOVZB(DisasContext* ctx, arg_MOVZB* a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
tcg_gen_ext8u_i32(temp, r[a->rs]);
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp, 0, 16);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MOVZW(DisasContext* ctx, arg_MOVZW* a) {
|
|
tcg_gen_andi_i32(r[a->rd], r[a->rs], 0xFFFF);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_movi_i32(r[a->rd+1], 0);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
|
|
/* Integer Arithmetic */
|
|
|
|
static void gen_ADD_imm(TCGv_i32 rd, int32_t imm, bool add_carry, uint8_t width) {
|
|
TCGv temp = tcg_temp_new();
|
|
|
|
tcg_gen_andi_i32(temp, rd, (1 << width * 8) - 1);
|
|
tcg_gen_addi_i32(temp, temp, imm & ((1 << width * 8) - 1));
|
|
|
|
if(add_carry) {
|
|
tcg_gen_andi_i32(psr_c, psr_c, 1);
|
|
tcg_gen_add_i32(temp, temp, psr_c);
|
|
}
|
|
|
|
/* Carry flag */
|
|
tcg_gen_shri_i32(psr_c, temp, width * 8);
|
|
|
|
/* Overflow flag */
|
|
tcg_gen_xor_i32(psr_f, temp, rd);
|
|
if(imm < 0) {
|
|
tcg_gen_and_i32(psr_f, psr_f, rd);
|
|
}
|
|
else {
|
|
tcg_gen_andc_i32(psr_f, psr_f, rd);
|
|
}
|
|
tcg_gen_shri_i32(psr_f, psr_f, width*8 - 1);
|
|
|
|
tcg_gen_deposit_i32(rd, rd, temp, 0, width*8);
|
|
}
|
|
|
|
|
|
static void gen_ADD_reg(TCGv reg1, TCGv reg2, bool plus_carry, uint8_t width) {
|
|
TCGv temp1 = tcg_temp_new_i32();
|
|
TCGv temp2 = tcg_temp_new_i32();
|
|
TCGv tempf_f = tcg_temp_new_i32();
|
|
|
|
/* If bit 8 shouldn't change without signed overflow */
|
|
tcg_gen_eqv_i32(tempf_f, reg1, reg2);
|
|
|
|
/* Clear potential top bits */
|
|
tcg_gen_andi_i32(temp1, reg1, (1 << width * 8) - 1);
|
|
tcg_gen_andi_i32(temp2, reg2, (1 << width * 8) - 1);
|
|
|
|
tcg_gen_add_i32(temp1, temp1, temp2);
|
|
|
|
if(plus_carry) {
|
|
tcg_gen_andi_i32(psr_c, psr_c, 1);
|
|
tcg_gen_add_i32(temp1, temp1, psr_c);
|
|
}
|
|
|
|
tcg_gen_deposit_i32(reg1, reg1, temp1, 0, width*8);
|
|
|
|
/* Carry flag */
|
|
tcg_gen_shri_i32(psr_c, temp1, width*8);
|
|
|
|
/* Overflow flag */
|
|
tcg_gen_xor_i32(psr_f, reg1, reg2);
|
|
tcg_gen_and_i32(psr_f, psr_f, tempf_f);
|
|
tcg_gen_shri_i32(psr_f, psr_f, width*8-1);
|
|
}
|
|
|
|
static void gen_ADDD_imm(int regnl, int32_t imm) {
|
|
TCGv_i64 temp_res = tcg_temp_new_i64();
|
|
TCGv_i32 temp_h = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 regl = r[regnl];
|
|
TCGv_i32 regh = r[regnl+1];
|
|
|
|
/* Move param registers into 64 bit temporary */
|
|
if (regnl < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(temp_h, regl, regh, 16, 16);
|
|
}
|
|
else {
|
|
tcg_gen_mov_i32(temp_h, regl);
|
|
}
|
|
tcg_gen_extu_i32_i64(temp_res, temp_h);
|
|
|
|
tcg_gen_addi_i64(temp_res, temp_res, imm & 0xFFFFFFFF);
|
|
tcg_gen_extrl_i64_i32(regl, temp_res);
|
|
|
|
/* Carry flag */
|
|
tcg_gen_extrh_i64_i32(psr_c, temp_res);
|
|
|
|
/* Overflow flag */
|
|
if (imm < 0) {
|
|
tcg_gen_andc_i32(psr_f, temp_h, regl);
|
|
}
|
|
else {
|
|
tcg_gen_andc_i32(psr_f, regl, temp_h);
|
|
}
|
|
tcg_gen_shri_i32(psr_f, psr_f, 31);
|
|
|
|
if (regnl < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(regh, regl, 16);
|
|
}
|
|
}
|
|
|
|
static bool gen_ADDD_rp(int rdn, TCGv_i32 rs, bool plus_one) {
|
|
TCGv temp2_32 = tcg_temp_new_i32();
|
|
TCGv_i64 temp1 = tcg_temp_new_i64();
|
|
TCGv_i64 temp2 = tcg_temp_new_i64();
|
|
|
|
TCGv_i32 rdl = r[rdn];
|
|
TCGv_i32 rdh = r[rdn + 1];
|
|
|
|
/* Move param registers into 64 bit temporaries */
|
|
tcg_gen_extu_i32_i64(temp1, rs);
|
|
|
|
if (rdn < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(temp2_32, rdl, rdh, 16, 16);
|
|
}
|
|
else {
|
|
tcg_gen_mov_i32(temp2_32, rdl);
|
|
}
|
|
tcg_gen_extu_i32_i64(temp2, temp2_32);
|
|
|
|
/* Prepare overflow flag */
|
|
tcg_gen_xor_i32(psr_f, temp2_32, rs);
|
|
|
|
tcg_gen_add_i64(temp1, temp1, temp2);
|
|
if(plus_one) {
|
|
tcg_gen_addi_i64(temp1, temp1, 1);
|
|
}
|
|
|
|
tcg_gen_extrl_i64_i32(rdl, temp1);
|
|
if (rdn < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(rdh, rdl, 16);
|
|
}
|
|
|
|
/* Carry flag */
|
|
tcg_gen_extrh_i64_i32(psr_c, temp1);
|
|
|
|
/* Overflow flag */
|
|
tcg_gen_xor_i32(temp2_32, temp2_32, rdl);
|
|
tcg_gen_andc_i32(psr_f, temp2_32, psr_f);
|
|
tcg_gen_shri_i32(psr_f, psr_f, 31);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ADD_imm(DisasContext *ctx, arg_ADD_imm *a) {
|
|
gen_ADD_imm(r[a->rd], a->imm, a->add_carry, a->width);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ADD_reg(DisasContext *ctx, arg_ADD_reg *a) {
|
|
gen_ADD_reg(r[a->rd], r[a->rs], a->add_carry, a->width);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ADDD_imm(DisasContext *ctx, arg_ADDD_imm *a) {
|
|
gen_ADDD_imm(a->rd, a->imm);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ADDD_rp(DisasContext *ctx, arg_ADDD_rp *a) {
|
|
if (a->rs < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rs], r[a->rs], r[a->rs+1], 16, 16);
|
|
}
|
|
gen_ADDD_rp(a->rd, r[a->rs], false);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ADDU_imm(DisasContext *ctx, arg_ADDU_imm *a) {
|
|
TCGv temp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_addi_i32(temp, r[a->rd], a->imm);
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp, 0, a->width*8);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ADDU_reg(DisasContext *ctx, arg_ADDU_reg *a) {
|
|
TCGv temp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_add_i32(temp, r[a->rd], r[a->rs]);
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp, 0, a->width*8);
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
static bool trans_MUL_imm(DisasContext *ctx, arg_MUL_imm *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_andi_i32(temp, r[a->rd], (1 << (a->width*8)) - 1);
|
|
tcg_gen_muli_i32(temp, temp, a->imm);
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp, 0, a->width*8);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MULB_reg(DisasContext *ctx, arg_MULB_reg *a) {
|
|
TCGv_i32 temp1 = tcg_temp_new_i32();
|
|
TCGv_i32 temp2 = tcg_temp_new_i32();
|
|
|
|
tcg_gen_andi_i32(temp1, r[a->rd], 0xFF);
|
|
tcg_gen_andi_i32(temp2, r[a->rs], 0xFF);
|
|
tcg_gen_mul_i32(temp1, temp1, temp2);
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp1, 0, 8);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MULSB_reg(DisasContext *ctx, arg_MULSB_reg *a) {
|
|
TCGv_i32 temp1 = tcg_temp_new_i32();
|
|
TCGv_i32 temp2 = tcg_temp_new_i32();
|
|
|
|
tcg_gen_ext8s_i32(temp1, r[a->rd]);
|
|
tcg_gen_ext8s_i32(temp2, r[a->rs]);
|
|
tcg_gen_mul_i32(temp1, temp1, temp2);
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp1, 0, 16);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MULSW_reg(DisasContext *ctx, arg_MULSW_reg *a) {
|
|
TCGv_i32 rdl = r[a->rd];
|
|
TCGv_i32 rdh = r[a->rd+1];
|
|
|
|
tcg_gen_ext16s_i32(rdl, rdl);
|
|
tcg_gen_ext16s_i32(r[a->rs], r[a->rs]);
|
|
tcg_gen_mul_i32(rdl, rdl, r[a->rs]);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(rdh, rdl, 16);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MULUW_reg(DisasContext *ctx, arg_MULUW_reg *a) {
|
|
TCGv_i32 rdl = r[a->rd];
|
|
TCGv_i32 rdh = r[a->rd+1];
|
|
|
|
tcg_gen_andi_i32(rdl, rdl, 0xFFFF);
|
|
tcg_gen_andi_i32(r[a->rs], r[a->rs], 0xFFFF);
|
|
tcg_gen_mul_i32(rdl, rdl, r[a->rs]);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(rdh, rdl, 16);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MULW_reg(DisasContext *ctx, arg_MULW_reg *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_mul_i32(temp, r[a->rd], r[a->rs]);
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp, 0, 16);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_SUB_imm(DisasContext *ctx, arg_SUB_imm *a) {
|
|
int32_t imm = -a->imm;
|
|
if (a->add_carry) {
|
|
imm--;
|
|
tcg_gen_xori_i32(psr_c, psr_c, 1);
|
|
}
|
|
|
|
gen_ADD_imm(r[a->rd], imm, a->add_carry, a->width);
|
|
tcg_gen_xori_i32(psr_c, psr_c, 1);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_SUB_reg(DisasContext *ctx, arg_SUB_reg *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
if (a->add_carry) {
|
|
tcg_gen_xori_i32(psr_c, psr_c, 1);
|
|
tcg_gen_xori_i32(temp, r[a->rs], (1 << (a->width*8)) - 1);
|
|
} else {
|
|
tcg_gen_neg_i32(temp, r[a->rs]);
|
|
}
|
|
gen_ADD_reg(r[a->rd], temp, a->add_carry, a->width);
|
|
tcg_gen_xori_i32(psr_c, psr_c, 1);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_SUBD_rp(DisasContext *ctx, arg_SUBD_rp *a) {
|
|
TCGv_i32 templ = tcg_temp_new_i32();
|
|
if (a->rs < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rs], r[a->rs], r[a->rs+1], 16, 16);
|
|
}
|
|
tcg_gen_xori_i32(templ, r[a->rs], 0xFFFFFFFF);
|
|
gen_ADDD_rp(a->rd, templ, true);
|
|
tcg_gen_xori_i32(psr_c, psr_c, 1);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_SUBD_imm(DisasContext *ctx, arg_SUBD_imm *a) {
|
|
gen_ADDD_imm(a->rd, -a->imm);
|
|
tcg_gen_xori_i32(psr_c, psr_c, 1);
|
|
return true;
|
|
}
|
|
|
|
|
|
/* TODO: This is quite certainly a bad implementation */
|
|
static bool trans_MACQW(DisasContext *ctx, arg_MACQW *a) {
|
|
TCGv temp_l = tcg_temp_new_i32();
|
|
TCGv temp_h = tcg_temp_new_i32();
|
|
TCGv temp_mul = temp_l;
|
|
TCGv temp_mul_sign = temp_h;
|
|
TCGv temp_neg = tcg_temp_new_i32();
|
|
TCGv temp_dest = tcg_temp_new_i32();
|
|
TCGv temp_of = temp_mul_sign;
|
|
TCGv temp_of2 = tcg_temp_new_i32();
|
|
|
|
|
|
tcg_gen_andi_i32(temp_l, r[a->rs1], 0x7FFF);
|
|
tcg_gen_shli_i32(temp_l, temp_l, 1);
|
|
tcg_gen_andi_i32(temp_h, r[a->rs2], 0x7FFF);
|
|
|
|
tcg_gen_mul_i32(temp_mul, temp_l, temp_h);
|
|
|
|
/* Replace with negative value in two's complement if result is negative */
|
|
tcg_gen_neg_i32(temp_neg, temp_mul);
|
|
tcg_gen_xor_i32(temp_mul_sign, r[a->rs1], r[a->rs2]);
|
|
tcg_gen_movcond_i32(TCG_COND_TSTNE, temp_mul, temp_mul_sign, tcg_constant_i32(0x8000), temp_neg, temp_mul);
|
|
|
|
/* Load destination register */
|
|
tcg_gen_mov_i32(temp_dest, r[a->rd]);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(temp_dest, r[a->rd], r[a->rd+1], 16, 16);
|
|
}
|
|
|
|
/* Convert dest to two's complement */
|
|
tcg_gen_andi_i32(temp_neg, temp_dest, 0x7FFFFFFF);
|
|
tcg_gen_neg_i32(temp_neg, temp_neg);
|
|
tcg_gen_movcond_i32(TCG_COND_TSTNE, temp_dest, temp_dest, tcg_constant_i32(0x80000000), temp_neg, temp_dest);
|
|
|
|
/* If sign change needs to be checked */
|
|
tcg_gen_eqv_i32(temp_of, temp_dest, temp_mul);
|
|
|
|
/* Add to the result register pair */
|
|
tcg_gen_add_i32(temp_dest, temp_dest, temp_mul);
|
|
|
|
/* Calculate overflow flag */
|
|
tcg_gen_xor_i32(temp_of2, temp_dest, temp_mul);
|
|
tcg_gen_and_i32(temp_of, temp_of, temp_of2);
|
|
|
|
/* Convert back from two's complement */
|
|
tcg_gen_sub_i32(temp_neg, tcg_constant_i32(0x8000), temp_dest);
|
|
tcg_gen_movcond_i32(TCG_COND_TSTNE, temp_dest, temp_dest, tcg_constant_i32(0x80000000), temp_neg, temp_dest);
|
|
|
|
/* Handle overflow */
|
|
tcg_gen_movcond_i32(TCG_COND_TSTNE, temp_of2, r[a->rd+1], tcg_constant_i32(0x8000), tcg_constant_i32(0xFFFFFFFF), tcg_constant_i32(0x7FFFFFFF));
|
|
tcg_gen_movcond_i32(TCG_COND_TSTNE, temp_dest, temp_of, tcg_constant_i32(0x80000000), temp_of2, temp_dest);
|
|
|
|
tcg_gen_mov_i32(r[a->rd], temp_dest);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(r[a->rd+1], temp_dest, 16);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MACUW(DisasContext *ctx, arg_MACUW *a) {
|
|
TCGv rdl = r[a->rd];
|
|
TCGv rdh = r[a->rd+1];
|
|
TCGv temp1 = tcg_temp_new_i32();
|
|
TCGv temp2 = tcg_temp_new_i32();
|
|
|
|
/* Zero potential junk in higher bits */
|
|
tcg_gen_andi_i32(temp1, r[a->rs1], 0xFFFF);
|
|
tcg_gen_andi_i32(temp2, r[a->rs2], 0xFFFF);
|
|
|
|
tcg_gen_mul_i32(temp1, temp1, temp2);
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(rdl, rdl, rdh, 16, 16);
|
|
}
|
|
|
|
tcg_gen_add2_i32(rdl, temp1, rdl, tcg_constant_i32(0), temp1, tcg_constant_i32(0));
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_NE, rdl, temp1, tcg_constant_i32(0), tcg_constant_i32(0xFFFFFFFF), rdl);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(rdh, rdl, 16);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_MACSW(DisasContext *ctx, arg_MACSW *a) {
|
|
TCGv rdl = r[a->rd];
|
|
TCGv rdh = r[a->rd+1];
|
|
TCGv temp1 = tcg_temp_new_i32();
|
|
TCGv temp2 = tcg_temp_new_i32();
|
|
TCGv temp_of = tcg_temp_new_i32();
|
|
TCGv temp_of2 = temp2;
|
|
|
|
tcg_gen_ext16s_i32(temp1, r[a->rs1]);
|
|
tcg_gen_ext16s_i32(temp2, r[a->rs2]);
|
|
|
|
tcg_gen_mul_i32(temp1, temp1, temp2);
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(rdl, rdl, rdh, 16, 16);
|
|
}
|
|
|
|
tcg_gen_eqv_i32(temp_of, rdl, temp1);
|
|
|
|
tcg_gen_add_i32(rdl, rdl, temp1);
|
|
|
|
tcg_gen_xor_i32(temp_of2, rdl, temp1);
|
|
tcg_gen_and_i32(temp_of, temp_of, temp_of2);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_TSTNE, temp1, temp1, tcg_constant_i32(0x8000), tcg_constant_i32(0x80000000), tcg_constant_i32(0x7FFFFFFF));
|
|
tcg_gen_movcond_i32(TCG_COND_TSTNE, rdl, temp_of, tcg_constant_i32(0x80000000), temp1, rdl);
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(rdh, rdl, 16);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
/* Integer Comparison */
|
|
|
|
static void gen_cmp(TCGv_i32 src1, TCGv_i32 src2) {
|
|
tcg_gen_setcond_i32(TCG_COND_EQ, psr_z, src1, src2);
|
|
tcg_gen_setcond_i32(TCG_COND_GT, psr_l, src1, src2);
|
|
tcg_gen_setcond_i32(TCG_COND_GTU, psr_n, src1, src2);
|
|
}
|
|
|
|
static bool trans_CMP_imm(DisasContext *ctx, arg_CMP_imm *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
if (a->width == 1) {
|
|
tcg_gen_ext8s_i32(temp, r[a->rs]);
|
|
}
|
|
else if (a->width == 2) {
|
|
tcg_gen_ext16s_i32(temp, r[a->rs]);
|
|
}
|
|
else if (a->rs < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(temp, r[a->rs], r[a->rs+1], 16, 16);
|
|
}
|
|
else {
|
|
tcg_gen_mov_i32(temp, r[a->rs]);
|
|
}
|
|
|
|
gen_cmp(tcg_constant_i32(a->imm), temp);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_CMP_reg(DisasContext *ctx, arg_CMP_reg *a) {
|
|
TCGv_i32 temp1 = tcg_temp_new_i32();
|
|
TCGv_i32 temp2 = tcg_temp_new_i32();
|
|
|
|
if (a->width == 1) {
|
|
tcg_gen_ext8s_i32(temp1, r[a->rs1]);
|
|
tcg_gen_ext8s_i32(temp2, r[a->rs2]);
|
|
}
|
|
else if (a->width == 2) {
|
|
tcg_gen_ext16s_i32(temp1, r[a->rs1]);
|
|
tcg_gen_ext16s_i32(temp2, r[a->rs2]);
|
|
}
|
|
|
|
gen_cmp(temp2, temp1);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_CMPD_reg(DisasContext *ctx, arg_CMPD_reg *a) {
|
|
if (a->rs1 < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rs1], r[a->rs1], r[a->rs1+1], 16, 16);
|
|
}
|
|
if (a->rs2 < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rs2], r[a->rs2], r[a->rs2+1], 16, 16);
|
|
}
|
|
|
|
gen_cmp(r[a->rs2], r[a->rs1]);
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
/* Logical and Boolean */
|
|
|
|
static bool trans_AND_imm(DisasContext *ctx, arg_AND_imm *a) {
|
|
int32_t imm = (-(1 << (a->width*8))) | a->imm;
|
|
tcg_gen_andi_i32(r[a->rd], r[a->rd], imm);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_AND_reg(DisasContext *ctx, arg_AND_reg *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
tcg_gen_ori_i32(temp, r[a->rs], -(1 << (a->width*8)));
|
|
tcg_gen_and_i32(r[a->rd], temp, r[a->rd]);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ANDD_imm(DisasContext *ctx, arg_ANDD_imm *a) {
|
|
tcg_gen_andi_i32(r[a->rd], r[a->rd], a->imm);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_andi_i32(r[a->rd+1], r[a->rd+1], a->imm >> 16);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ANDD_rp(DisasContext *ctx, arg_ANDD_rp *a) {
|
|
if (a->rd < CR16C_FIRST_32B_REG && a->rs < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_and_i32(r[a->rd], r[a->rd], r[a->rs]);
|
|
tcg_gen_and_i32(r[a->rd+1], r[a->rd+1], r[a->rs+1]);
|
|
}
|
|
else {
|
|
if (a->rs < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rs], r[a->rs], r[a->rs+1], 16, 16);
|
|
}
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], r[a->rd+1], 16, 16);
|
|
}
|
|
tcg_gen_and_i32(r[a->rd], r[a->rd], r[a->rs]);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(r[a->rd+1], r[a->rd], 16);
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool trans_OR_imm(DisasContext *ctx, arg_OR_imm *a) {
|
|
tcg_gen_ori_i32(r[a->rd], r[a->rd], a->imm & ((1 << (a->width*8)) - 1));
|
|
return true;
|
|
}
|
|
|
|
static bool trans_OR_reg(DisasContext *ctx, arg_OR_reg *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
tcg_gen_andi_i32(temp, r[a->rs], (1 << (a->width*8)) - 1);
|
|
tcg_gen_or_i32(r[a->rd], temp, r[a->rd]);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ORD_imm(DisasContext *ctx, arg_ORD_imm *a) {
|
|
tcg_gen_ori_i32(r[a->rd], r[a->rd], a->imm);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_ori_i32(r[a->rd+1], r[a->rd+1], a->imm >> 16);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ORD_rp(DisasContext *ctx, arg_ORD_rp *a) {
|
|
if (a->rd < CR16C_FIRST_32B_REG && a->rs < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_or_i32(r[a->rd], r[a->rd], r[a->rs]);
|
|
tcg_gen_or_i32(r[a->rd+1], r[a->rd+1], r[a->rs+1]);
|
|
}
|
|
else {
|
|
if (a->rs < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rs], r[a->rs], r[a->rs+1], 16, 16);
|
|
}
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], r[a->rd+1], 16, 16);
|
|
}
|
|
tcg_gen_or_i32(r[a->rd], r[a->rd], r[a->rs]);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(r[a->rd+1], r[a->rd], 16);
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool trans_XOR_imm(DisasContext *ctx, arg_XOR_imm *a) {
|
|
tcg_gen_xori_i32(r[a->rd], r[a->rd], a->imm & ((1 << (a->width*8)) - 1));
|
|
return true;
|
|
}
|
|
|
|
static bool trans_XOR_reg(DisasContext *ctx, arg_XOR_reg *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
tcg_gen_andi_i32(temp, r[a->rs], (1 << (a->width*8)) - 1);
|
|
tcg_gen_xor_i32(r[a->rd], temp, r[a->rd]);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_XORD_imm(DisasContext *ctx, arg_XORD_imm *a) {
|
|
tcg_gen_xori_i32(r[a->rd], r[a->rd], a->imm);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_xori_i32(r[a->rd+1], r[a->rd+1], a->imm >> 16);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool trans_XORD_rp(DisasContext *ctx, arg_XORD_rp *a) {
|
|
if (a->rd < CR16C_FIRST_32B_REG && a->rs < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_xor_i32(r[a->rd], r[a->rd], r[a->rs]);
|
|
tcg_gen_xor_i32(r[a->rd+1], r[a->rd+1], r[a->rs+1]);
|
|
}
|
|
else {
|
|
if (a->rs < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rs], r[a->rs], r[a->rs+1], 16, 16);
|
|
}
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], r[a->rd+1], 16, 16);
|
|
}
|
|
tcg_gen_xor_i32(r[a->rd], r[a->rd], r[a->rs]);
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(r[a->rd+1], r[a->rd], 16);
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool trans_SCOND(DisasContext *ctx, arg_SCOND *a) {
|
|
switch (a->imm) {
|
|
case CR16C_COND_EQ:
|
|
tcg_gen_andi_i32(r[a->rd], psr_z, 1);
|
|
break;
|
|
case CR16C_COND_NE:
|
|
tcg_gen_andc_i32(r[a->rd], tcg_constant_i32(1), psr_z);
|
|
break;
|
|
case CR16C_COND_CS:
|
|
tcg_gen_andi_i32(r[a->rd], psr_c, 1);
|
|
break;
|
|
case CR16C_COND_CC:
|
|
tcg_gen_andc_i32(r[a->rd], tcg_constant_i32(1), psr_c);
|
|
break;
|
|
case CR16C_COND_HI:
|
|
tcg_gen_mov_i32(r[a->rd], psr_l);
|
|
break;
|
|
case CR16C_COND_LS:
|
|
tcg_gen_andc_i32(r[a->rd], tcg_constant_i32(1), psr_l);
|
|
break;
|
|
case CR16C_COND_GT:
|
|
tcg_gen_mov_i32(r[a->rd], psr_n);
|
|
break;
|
|
case CR16C_COND_LE:
|
|
tcg_gen_andc_i32(r[a->rd], tcg_constant_i32(1), psr_n);
|
|
break;
|
|
case CR16C_COND_FS:
|
|
tcg_gen_andi_i32(r[a->rd], psr_f, 1);
|
|
break;
|
|
case CR16C_COND_FC:
|
|
tcg_gen_andc_i32(r[a->rd], tcg_constant_i32(1), psr_f);
|
|
break;
|
|
case CR16C_COND_LO:
|
|
tcg_gen_nor_i32(r[a->rd], psr_z, psr_l);
|
|
tcg_gen_andi_i32(r[a->rd], r[a->rd], 1);
|
|
break;
|
|
case CR16C_COND_HS:
|
|
tcg_gen_or_i32(r[a->rd], psr_z, psr_l);
|
|
break;
|
|
case CR16C_COND_LT:
|
|
tcg_gen_nor_i32(r[a->rd], psr_z, psr_n);
|
|
tcg_gen_andi_i32(r[a->rd], r[a->rd], 1);
|
|
break;
|
|
case CR16C_COND_GE:
|
|
tcg_gen_or_i32(r[a->rd], psr_z, psr_n);
|
|
break;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
/* Shifts */
|
|
|
|
static bool trans_ASHU_imm_l(DisasContext *ctx, arg_ASHU_imm_l *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
tcg_gen_shli_i32(temp, r[a->rd], a->imm);
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp, 0, a->width*8);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ASHU_imm_r(DisasContext *ctx, arg_ASHU_imm_r *a) {
|
|
int32_t imm = (-a->imm) & (a->width == 1 ? 0x7 : 0xF);
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
if (a->width == 1) {
|
|
tcg_gen_ext8s_i32(temp, r[a->rd]);
|
|
}
|
|
else { /* a->width == 2 */
|
|
tcg_gen_ext16s_i32(temp, r[a->rd]);
|
|
}
|
|
|
|
tcg_gen_sari_i32(temp, temp, imm);
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp, 0, a->width*8);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ASHU_reg(DisasContext *ctx, arg_ASHU_reg *a) {
|
|
TCGv_i32 temp_dest = tcg_temp_new_i32();
|
|
TCGv_i32 temp_count = tcg_temp_new_i32();
|
|
TCGv_i32 temp_resl = tcg_temp_new_i32();
|
|
TCGv_i32 temp_resr = temp_dest;
|
|
|
|
if (a->width == 1) {
|
|
tcg_gen_ext8s_i32(temp_dest, r[a->rd]);
|
|
}
|
|
else { /* a->width == 2 */
|
|
tcg_gen_ext16s_i32(temp_dest, r[a->rd]);
|
|
}
|
|
tcg_gen_ext8s_i32(temp_count, r[a->rc]);
|
|
|
|
tcg_gen_shl_i32(temp_resl, r[a->rd], temp_count);
|
|
|
|
tcg_gen_neg_i32(temp_count, temp_count);
|
|
tcg_gen_sar_i32(temp_resr, temp_dest, temp_count);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_GEU, temp_dest, temp_count, tcg_constant_i32(0x80), temp_resl, temp_resr);
|
|
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp_dest, 0, a->width*8);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ASHUD_imm_l(DisasContext *ctx, arg_ASHUD_imm_l *a) {
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], r[a->rd+1], 16, 16);
|
|
}
|
|
|
|
tcg_gen_shli_i32(r[a->rd], r[a->rd], a->imm);
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(r[a->rd+1], r[a->rd], 16);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ASHUD_imm_r(DisasContext *ctx, arg_ASHUD_imm_r *a) {
|
|
int32_t imm = (~a->imm+1) & 0x1F;
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], r[a->rd+1], 16, 16);
|
|
}
|
|
|
|
tcg_gen_sari_i32(r[a->rd], r[a->rd], imm);
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(r[a->rd+1], r[a->rd], 16);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_ASHUD_rp(DisasContext *ctx, arg_ASHUD_rp *a) {
|
|
TCGv_i32 temp_resl = tcg_temp_new_i32();
|
|
TCGv_i32 temp_resr = r[a->rd];
|
|
TCGv_i32 temp_count = tcg_temp_new_i32();
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], r[a->rd+1], 16, 16);
|
|
}
|
|
tcg_gen_ext8s_i32(temp_count, r[a->rc]);
|
|
|
|
tcg_gen_shl_i32(temp_resl, r[a->rd], temp_count);
|
|
|
|
tcg_gen_neg_i32(temp_count, temp_count);
|
|
tcg_gen_sar_i32(temp_resr, r[a->rd], temp_count);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_GEU, r[a->rd], temp_count, tcg_constant_i32(0x80), temp_resl, temp_resr);
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(r[a->rd+1], r[a->rd], 16);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_LSH_imm_r(DisasContext *ctx, arg_LSH_imm_r *a) {
|
|
int32_t imm = (-a->imm) & (a->width == 1 ? 0x7 : 0xF);
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
if (a->width == 1) {
|
|
tcg_gen_ext8u_i32(temp, r[a->rd]);
|
|
}
|
|
else { /* a->width == 2 */
|
|
tcg_gen_ext16u_i32(temp, r[a->rd]);
|
|
}
|
|
|
|
tcg_gen_shri_i32(temp, temp, imm);
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp, 0, a->width*8);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_LSH_reg(DisasContext *ctx, arg_LSH_reg *a) {
|
|
TCGv_i32 temp_dest = tcg_temp_new_i32();
|
|
TCGv_i32 temp_count = tcg_temp_new_i32();
|
|
TCGv_i32 temp_resl = tcg_temp_new_i32();
|
|
TCGv_i32 temp_resr = temp_dest;
|
|
|
|
if (a->width == 1) {
|
|
tcg_gen_ext8u_i32(temp_dest, r[a->rd]);
|
|
}
|
|
else { /* a->width == 2 */
|
|
tcg_gen_ext16u_i32(temp_dest, r[a->rd]);
|
|
}
|
|
tcg_gen_ext8s_i32(temp_count, r[a->rc]);
|
|
|
|
tcg_gen_shl_i32(temp_resl, r[a->rd], temp_count);
|
|
|
|
tcg_gen_neg_i32(temp_count, temp_count);
|
|
tcg_gen_shr_i32(temp_resr, temp_dest, temp_count);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_GEU, temp_dest, temp_count, tcg_constant_i32(0x80), temp_resl, temp_resr);
|
|
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], temp_dest, 0, a->width*8);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_LSHD_imm_r(DisasContext *ctx, arg_LSHD_imm_r *a) {
|
|
int16_t imm = (~a->imm+1) & 0x1F;
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], r[a->rd+1], 16, 16);
|
|
}
|
|
|
|
tcg_gen_shri_i32(r[a->rd], r[a->rd], imm);
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(r[a->rd+1], r[a->rd], 16);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_LSHD_rp(DisasContext *ctx, arg_LSHD_rp *a) {
|
|
TCGv_i32 temp_resl = tcg_temp_new_i32();
|
|
TCGv_i32 temp_resr = r[a->rd];
|
|
TCGv_i32 temp_count = tcg_temp_new_i32();
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[a->rd], r[a->rd], r[a->rd+1], 16, 16);
|
|
}
|
|
tcg_gen_ext8s_i32(temp_count, r[a->rc]);
|
|
|
|
tcg_gen_shl_i32(temp_resl, r[a->rd], temp_count);
|
|
|
|
tcg_gen_neg_i32(temp_count, temp_count);
|
|
tcg_gen_shr_i32(temp_resr, r[a->rd], temp_count);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_GEU, r[a->rd], temp_count, tcg_constant_i32(0x80), temp_resl, temp_resr);
|
|
|
|
if (a->rd < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_shri_i32(r[a->rd+1], r[a->rd], 16);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
/* Bit Operation */
|
|
|
|
static void gen_CBIT(TCGv_i32 addr, uint8_t pos, uint8_t width) {
|
|
uint32_t mask = ~(1 << pos);
|
|
MemOp memop = width == 2 ? MO_UW : MO_UB;
|
|
tcg_gen_atomic_fetch_and_i32(psr_f, addr, tcg_constant_i32(mask), 0, memop);
|
|
tcg_gen_shri_i32(psr_f, psr_f, pos);
|
|
}
|
|
|
|
static void gen_SBIT(TCGv_i32 addr, uint8_t pos, uint8_t width) {
|
|
uint32_t mask = 1 << pos;
|
|
MemOp memop = width == 2 ? MO_UW : MO_UB;
|
|
tcg_gen_atomic_fetch_or_i32(psr_f, addr, tcg_constant_i32(mask), 0, memop);
|
|
tcg_gen_shri_i32(psr_f, psr_f, pos);
|
|
}
|
|
|
|
static bool gen_TBIT_mem(TCGv_i32 addr, uint8_t pos, uint8_t width) {
|
|
tcg_gen_qemu_ld_i32(psr_f, addr, 0, unsigned_op_by_width[width]);
|
|
tcg_gen_shri_i32(psr_f, psr_f, pos);
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
static bool trans_CBIT_rrp(DisasContext *ctx, arg_CBIT_rrp *a) {
|
|
TCGv_i32 addr = tcg_temp_new_i32();
|
|
|
|
gen_compute_rrp_addr(addr, a->rrp, a->disp);
|
|
gen_CBIT(addr, a->pos, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_CBIT_reg(DisasContext *ctx, arg_CBIT_reg *a) {
|
|
TCGv_i32 addr = tcg_temp_new_i32();
|
|
|
|
gen_compute_addr_disp(addr, a->ra, a->disp, a->dbase);
|
|
gen_CBIT(addr, a->pos, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_CBIT_abs(DisasContext *ctx, arg_CBIT_abs *a) {
|
|
gen_CBIT(tcg_constant_i32(a->addr), a->pos, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_SBIT_rrp(DisasContext *ctx, arg_SBIT_rrp *a) {
|
|
TCGv_i32 addr = tcg_temp_new_i32();
|
|
|
|
gen_compute_rrp_addr(addr, a->rrp, a->disp);
|
|
gen_SBIT(addr, a->pos, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_SBIT_reg(DisasContext *ctx, arg_SBIT_reg *a) {
|
|
TCGv_i32 addr = tcg_temp_new_i32();
|
|
|
|
gen_compute_addr_disp(addr, a->ra, a->disp, a->dbase);
|
|
gen_SBIT(addr, a->pos, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_SBIT_abs(DisasContext *ctx, arg_SBIT_abs *a) {
|
|
gen_SBIT(tcg_constant_i32(a->addr), a->pos, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_TBIT_mem_rrp(DisasContext *ctx, arg_TBIT_mem_rrp *a) {
|
|
TCGv_i32 addr = tcg_temp_new_i32();
|
|
|
|
gen_compute_rrp_addr(addr, a->rrp, a->disp);
|
|
gen_TBIT_mem(addr, a->pos, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_TBIT_mem_reg(DisasContext *ctx, arg_TBIT_mem_reg *a) {
|
|
TCGv_i32 addr = tcg_temp_new_i32();
|
|
|
|
gen_compute_addr_disp(addr, a->ra, a->disp, a->dbase);
|
|
gen_TBIT_mem(addr, a->pos, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_TBIT_mem_abs(DisasContext *ctx, arg_TBIT_mem_abs *a) {
|
|
gen_TBIT_mem(tcg_constant_i32(a->addr), a->pos, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
static bool trans_TBIT_reg_imm(DisasContext *ctx, arg_TBIT_reg_imm *a) {
|
|
tcg_gen_shri_i32(psr_f, r[a->rs], a->pos);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_TBIT_reg_reg(DisasContext *ctx, arg_TBIT_reg_reg *a) {
|
|
tcg_gen_shr_i32(psr_f, r[a->rs], r[a->rp]);
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
/* Jumps and Linkage */
|
|
|
|
/* For now this instruction is abused as semihosting instruction for tests */
|
|
static bool trans_EXCP(DisasContext *ctx, arg_EXCP *a) {
|
|
if (a->id == 14) {// DBG
|
|
gen_helper_dump_regs(tcg_env);
|
|
return true;
|
|
}
|
|
gen_helper_exit(tcg_env);
|
|
|
|
ctx->base.is_jmp = DISAS_NORETURN;
|
|
|
|
return true;
|
|
}
|
|
|
|
static void gen_br_cond(DisasContext* ctx, int cond, TCGLabel* l) {
|
|
TCGv temp;
|
|
|
|
/* Flags may have higher bits set */
|
|
tcg_gen_andi_i32(psr_n, psr_n, 1);
|
|
tcg_gen_andi_i32(psr_z, psr_z, 1);
|
|
tcg_gen_andi_i32(psr_f, psr_f, 1);
|
|
tcg_gen_andi_i32(psr_l, psr_l, 1);
|
|
tcg_gen_andi_i32(psr_c, psr_c, 1);
|
|
|
|
switch (cond) {
|
|
case CR16C_COND_EQ:
|
|
tcg_gen_brcondi_i32(TCG_COND_EQ, psr_z, 1, l);
|
|
break;
|
|
case CR16C_COND_NE:
|
|
tcg_gen_brcondi_i32(TCG_COND_NE, psr_z, 1, l);
|
|
break;
|
|
case CR16C_COND_CS:
|
|
tcg_gen_brcondi_i32(TCG_COND_EQ, psr_c, 1, l);
|
|
break;
|
|
case CR16C_COND_CC:
|
|
tcg_gen_brcondi_i32(TCG_COND_NE, psr_c, 1, l);
|
|
break;
|
|
case CR16C_COND_HI:
|
|
tcg_gen_brcondi_i32(TCG_COND_EQ, psr_l, 1, l);
|
|
break;
|
|
case CR16C_COND_LS:
|
|
tcg_gen_brcondi_i32(TCG_COND_NE, psr_l, 1, l);
|
|
break;
|
|
case CR16C_COND_GT:
|
|
tcg_gen_brcondi_i32(TCG_COND_EQ, psr_n, 1, l);
|
|
break;
|
|
case CR16C_COND_LE:
|
|
tcg_gen_brcondi_i32(TCG_COND_NE, psr_n, 1, l);
|
|
break;
|
|
case CR16C_COND_FS:
|
|
tcg_gen_brcondi_i32(TCG_COND_EQ, psr_f, 1, l);
|
|
break;
|
|
case CR16C_COND_FC:
|
|
tcg_gen_brcondi_i32(TCG_COND_NE, psr_f, 1, l);
|
|
break;
|
|
case CR16C_COND_LO:
|
|
temp = tcg_temp_new_i32();
|
|
tcg_gen_or_i32(temp, psr_z, psr_l);
|
|
tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l);
|
|
break;
|
|
case CR16C_COND_HS:
|
|
temp = tcg_temp_new_i32();
|
|
tcg_gen_and_i32(temp, psr_z, psr_l);
|
|
tcg_gen_brcondi_i32(TCG_COND_NE, temp, 1, l);
|
|
break;
|
|
case CR16C_COND_LT:
|
|
temp = tcg_temp_new_i32();
|
|
tcg_gen_or_i32(temp, psr_z, psr_n);
|
|
tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l);
|
|
break;
|
|
case CR16C_COND_GE:
|
|
temp = tcg_temp_new_i32();
|
|
tcg_gen_and_i32(temp, psr_z, psr_n);
|
|
tcg_gen_brcondi_i32(TCG_COND_NE, temp, 1, l);
|
|
break;
|
|
case CR16C_COND_ALWAYS:
|
|
tcg_gen_br(l);
|
|
break;
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
}
|
|
|
|
static bool trans_BRCOND(DisasContext* ctx, arg_BRCOND *a) {
|
|
TCGLabel* l = gen_new_label();
|
|
gen_br_cond(ctx, a->cond, l);
|
|
|
|
gen_goto(&ctx->base, ctx->base.pc_next, 0);
|
|
|
|
gen_set_label(l);
|
|
|
|
gen_goto(&ctx->base, a->dest, 1);
|
|
|
|
ctx->base.is_jmp = DISAS_NORETURN;
|
|
return true;
|
|
}
|
|
|
|
static bool trans_BR0(DisasContext* ctx, arg_BR0* a) {
|
|
int disp5 = (a->dest + 1) * 2; // disp*2+
|
|
|
|
TCGv tmp = tcg_temp_new_i32();
|
|
if (a->word == 1) {
|
|
tcg_gen_ext16u_i32(tmp, r[a->src]);
|
|
} else {
|
|
tcg_gen_ext8u_i32(tmp, r[a->src]);
|
|
}
|
|
|
|
TCGLabel* l = gen_new_label();
|
|
tcg_gen_brcondi_i32(a->ne == 1 ? TCG_COND_NE : TCG_COND_EQ, tmp, 0, l);
|
|
|
|
gen_goto(&ctx->base, ctx->base.pc_next, 0);
|
|
|
|
gen_set_label(l);
|
|
|
|
vaddr pc_this = ctx->base.pc_next - 2;
|
|
gen_goto(&ctx->base, pc_this + disp5, 1);
|
|
|
|
ctx->base.is_jmp = DISAS_NORETURN;
|
|
return true;
|
|
}
|
|
|
|
static bool trans_JCOND(DisasContext* ctx, arg_JCOND *a) {
|
|
TCGLabel* l = gen_new_label();
|
|
|
|
gen_br_cond(ctx, a->cond, l);
|
|
|
|
tcg_gen_movi_i32(pc, ctx->base.pc_next);
|
|
tcg_gen_lookup_and_goto_ptr();
|
|
|
|
gen_set_label(l);
|
|
|
|
tcg_gen_mov_i32(pc, r[a->ra]);
|
|
tcg_gen_lookup_and_goto_ptr();
|
|
|
|
ctx->base.is_jmp = DISAS_NORETURN;
|
|
return true;
|
|
}
|
|
|
|
|
|
/* Load and Store */
|
|
|
|
|
|
static void gen_move_dest(TCGv_i32 rs, int rd_id, int width) {
|
|
if (width <= 2 || rd_id >= CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[rd_id], r[rd_id], rs, 0, width * 8);
|
|
} else {
|
|
tcg_gen_mov_i32(r[rd_id], rs);
|
|
tcg_gen_extract_i32(r[rd_id+1], rs, 16, (width-2)*8);
|
|
}
|
|
}
|
|
|
|
|
|
static bool trans_LOAD(DisasContext *ctx, arg_LOAD *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
gen_compute_addr_disp(temp, a->ra, a->disp, a->dbase);
|
|
tcg_gen_qemu_ld_i32(temp, temp, 0, unsigned_op_by_width[a->width]);
|
|
gen_move_dest(temp, a->rd, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_LOAD_abs(DisasContext *ctx, arg_LOAD_abs *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_qemu_ld_i32(temp, tcg_constant_i32(a->addr), 0, unsigned_op_by_width[a->width]);
|
|
gen_move_dest(temp, a->rd, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_LOAD_rrp(DisasContext *ctx, arg_LOAD_rrp *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
gen_compute_rrp_addr(temp, a->rrp, a->disp);
|
|
tcg_gen_qemu_ld_i32(temp, temp, 0, unsigned_op_by_width[a->width]);
|
|
gen_move_dest(temp, a->rd, a->width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_LOAD_ind_abs(DisasContext *ctx, arg_LOAD_ind_abs *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_addi_i32(temp, r[12 + a->ri], a->addr);
|
|
tcg_gen_qemu_ld_i32(temp, temp, 0, unsigned_op_by_width[a->width]);
|
|
gen_move_dest(temp, a->rd, a->width);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_LOADM(DisasContext *ctx, arg_LOADM *a) {
|
|
if (a->pair) {
|
|
tcg_gen_deposit_i32(r[0], r[0], r[1], 16, 16);
|
|
}
|
|
else {
|
|
tcg_gen_andi_i32(r[0], r[0], 0xFFFF);
|
|
}
|
|
|
|
for (int i = 0; i < a->cnt+1; i++) {
|
|
TCGv_i32 dest_reg = r[(i+2 <= 5) ? (i+2) : (i+4)];
|
|
tcg_gen_addi_i32(dest_reg, r[0], i*2);
|
|
tcg_gen_qemu_ld_i32(dest_reg, dest_reg, 0, MO_UW);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
// TODO this is at fault for 32 bit values ending up in the register backing values
|
|
static void gen_combine_rp(int reg_id, int width) {
|
|
if (width == 4 && reg_id < CR16C_FIRST_32B_REG) {
|
|
tcg_gen_deposit_i32(r[reg_id], r[reg_id], r[reg_id+1], 16, 16);
|
|
}
|
|
}
|
|
|
|
static bool trans_STOR(DisasContext *ctx, arg_STOR *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
gen_compute_addr_disp(temp, a->ra, a->disp, a->dbase);
|
|
gen_combine_rp(a->rs, a->width);
|
|
tcg_gen_qemu_st_i32(r[a->rs], temp, 0, unsigned_op_by_width[a->width]);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_STOR_rrp(DisasContext *ctx, arg_STOR_rrp *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
gen_compute_rrp_addr(temp, a->rrp, a->disp);
|
|
gen_combine_rp(a->rs, a->width);
|
|
tcg_gen_qemu_st_i32(r[a->rs], temp, 0, unsigned_op_by_width[a->width]);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_STOR_abs(DisasContext *ctx, arg_STOR_abs *a) {
|
|
int32_t addr = a->addr;
|
|
|
|
// Table 5-7 describes the addressing options for the
|
|
// first [single reg -> dest] and second [STORD] formats of the instruction.
|
|
// See Table 5-7, footnote f, which applies for abs20
|
|
|
|
if (a->remap) {
|
|
addr = addr > 0xEFFFF ? addr | 0xF00000 : addr;
|
|
}
|
|
|
|
gen_combine_rp(a->rs, a->width);
|
|
tcg_gen_qemu_st_i32(r[a->rs], tcg_constant_i32(addr), 0, unsigned_op_by_width[a->width]);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_STOR_ind_abs(DisasContext *ctx, arg_STOR_ind_abs *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_addi_i32(temp, r[12 + a->ri], a->addr);
|
|
gen_combine_rp(a->rs, a->width);
|
|
tcg_gen_qemu_st_i32(r[a->rs], temp, 0, unsigned_op_by_width[a->width]);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_STOR_abs_imm(DisasContext *ctx, arg_STOR_abs_imm *a) {
|
|
int32_t addr = a->addr;
|
|
|
|
// Table 5-4 describes the addressing options for the
|
|
// third [imm -> dest] format of the instruction.
|
|
// See Table 5-4, footnote e, which applies for abs20
|
|
if (a->remap) {
|
|
addr = addr > 0xEFFFF ? addr | 0xF00000 : addr;
|
|
}
|
|
|
|
tcg_gen_qemu_st_i32(tcg_constant_i32(a->imm), tcg_constant_i32(addr), 0, unsigned_op_by_width[a->width]);
|
|
return true;
|
|
}
|
|
|
|
static bool trans_STOR_imm(DisasContext *ctx, arg_STOR_imm *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
gen_compute_addr_disp(temp, a->ra, a->disp, a->dbase);
|
|
tcg_gen_qemu_st_i32(tcg_constant_i32(a->imm), temp, 0, unsigned_op_by_width[a->width]);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_STOR_rrp_imm(DisasContext *ctx, arg_STOR_rrp_imm *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
gen_compute_rrp_addr(temp, a->rrp, a->disp);
|
|
tcg_gen_qemu_st_i32(tcg_constant_i32(a->imm), temp, 0, unsigned_op_by_width[a->width]);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool trans_STOR_abs_rrp_imm(DisasContext *ctx, arg_STOR_abs_rrp_imm *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_addi_i32(temp, r[12 + a->ri], a->addr);
|
|
tcg_gen_qemu_st_i32(tcg_constant_i32(a->imm), temp, 0, unsigned_op_by_width[a->width]);
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
static bool trans_STORM(DisasContext *ctx, arg_STORM *a) {
|
|
TCGv_i32 temp = tcg_temp_new_i32();
|
|
|
|
if (a->pair) {
|
|
tcg_gen_deposit_i32(r[0], r[0], r[1], 16, 16);
|
|
}
|
|
else {
|
|
tcg_gen_andi_i32(r[0], r[0], 0xFFFF);
|
|
}
|
|
|
|
for (int i = 0; i < a->cnt+1; i++) {
|
|
TCGv_i32 rs = r[(i+2 <= 5) ? (i+2) : (i+4)];
|
|
tcg_gen_addi_i32(temp, r[0], i*2);
|
|
tcg_gen_qemu_st_i32(rs, temp, 0, MO_UW);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool trans_LPR(DisasContext *ctx, arg_LPR* a) {
|
|
gen_helper_raise_unimplemented_instruction();
|
|
return true;
|
|
}
|
|
|
|
static bool trans_LPRD(DisasContext *ctx, arg_LPRD* a) {
|
|
gen_helper_raise_unimplemented_instruction();
|
|
return true;
|
|
}
|
|
|
|
static bool trans_SPR(DisasContext *ctx, arg_SPR* a) {
|
|
gen_helper_raise_unimplemented_instruction();
|
|
return true;
|
|
}
|
|
|
|
static bool trans_SPRD(DisasContext *ctx, arg_SPRD* a) {
|
|
gen_helper_raise_unimplemented_instruction();
|
|
return true;
|
|
}
|
|
|
|
static bool trans_BAL_ra(DisasContext *ctx, arg_BAL_ra *a) {
|
|
#define BAL_LEN 4
|
|
// TODO BAL_rp would be 3 words/6 bytes (fmt 3a)
|
|
vaddr pc_this = ctx->base.pc_next - BAL_LEN;
|
|
vaddr dest_offset = a->dest*2;
|
|
qemu_printf("!!!bal %04lx dest=%d\n", pc_this, a->dest);
|
|
|
|
// 1) store next PC in ra TODO BAL_rp can choose where
|
|
tcg_gen_movi_i32(r[CR16C_REGNO_RA], ctx->base.pc_next);
|
|
|
|
// 2) sign extend from 23 -> "25" bits
|
|
int32_t dest_sextend = sextract32(dest_offset, 0, 23);
|
|
|
|
qemu_printf("=> 0x%lx\n", pc_this + dest_sextend);
|
|
ctx->base.is_jmp = DISAS_NORETURN;
|
|
gen_goto(&ctx->base, pc_this + dest_sextend, 0);
|
|
|
|
// TODO IAD trap
|
|
// TODO CFG.SR handling
|
|
return true;
|
|
}
|
|
|
|
static bool trans_pop(DisasContext *ctx, arg_pop *a) {
|
|
// count is 3 bits, so its range is 0-7
|
|
// however, it encodes counts 1-8
|
|
int32_t count = a->count + 1;
|
|
// todo if cfg.sr = 1
|
|
if (true) {
|
|
if (a->count + a->dest > 15) {
|
|
// invalid
|
|
return false;
|
|
}
|
|
} else {
|
|
// TODO cfg.sr = 0
|
|
}
|
|
for (int i = 0; i < count; ++i) {
|
|
// pop regular registers
|
|
// TODO register pair popping???
|
|
tcg_gen_qemu_ld_tl(r[a->dest + i], r[CR16C_REGNO_SP], 0, MO_UW);
|
|
tcg_gen_addi_tl(r[CR16C_REGNO_SP], r[CR16C_REGNO_SP], 2);
|
|
}
|
|
// TODO memory spaces(???)
|
|
// pop RA if requested
|
|
if (a->ra) {
|
|
tcg_gen_qemu_ld_tl(r[CR16C_REGNO_RA], r[CR16C_REGNO_SP], 0, MO_32);
|
|
tcg_gen_addi_tl(r[CR16C_REGNO_SP], r[CR16C_REGNO_SP], 4);
|
|
}
|
|
|
|
if (a->rt) {
|
|
// basically, JUMP RA
|
|
ctx->base.is_jmp = DISAS_NORETURN;
|
|
|
|
tcg_gen_goto_tb(0);
|
|
tcg_gen_mov_i32(pc, r[CR16C_REGNO_RA]);
|
|
tcg_gen_exit_tb(ctx->base.tb, 0);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool trans_push(DisasContext *ctx, arg_push *a) {
|
|
int32_t count = a->count + 1;
|
|
|
|
// we need to push the registers in reverse order so that they are in the correct places for POP, e.g.:
|
|
// SP = 0x100A
|
|
// # popret $3, R1, RA
|
|
// 0x1000 R1
|
|
// 0x1002 R2
|
|
// 0x1004 R3
|
|
// 0x1006 RA # 4 bytes!!
|
|
// 0x100A xxxxx
|
|
|
|
// TODO memory spaces
|
|
// push RA
|
|
if (a->ra) {
|
|
tcg_gen_subi_tl(r[CR16C_REGNO_SP], r[CR16C_REGNO_SP], 4);
|
|
tcg_gen_qemu_st_tl(r[CR16C_REGNO_RA], r[CR16C_REGNO_SP], 0, MO_32);
|
|
}
|
|
|
|
// push regular registers
|
|
for (int i = count - 1; i >= 0; --i) {
|
|
// TODO register pairs
|
|
tcg_gen_subi_tl(r[CR16C_REGNO_SP], r[CR16C_REGNO_SP], 2);
|
|
tcg_gen_qemu_st_tl(r[a->src + i], r[CR16C_REGNO_SP], 0, MO_16);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
/* Some instructions aren't implemented yet, eg. because of some binutils that make them hard to verify and we'll fix first */
|
|
static bool trans_UNIMPLEMENTED(DisasContext *ctx, arg_UNIMPLEMENTED *a) {
|
|
gen_helper_raise_unimplemented_instruction();
|
|
ctx->base.is_jmp = DISAS_NORETURN;
|
|
return true;
|
|
}
|
|
|
|
|
|
/*** Translation hooks ****/
|
|
|
|
static void cr16c_tr_translate_insn(DisasContextBase *base, CPUState *cs) {
|
|
DisasContext *ctx = container_of(base, DisasContext, base);
|
|
|
|
uint64_t insn = decode_load(ctx);
|
|
if(!decode(ctx, insn)) {
|
|
// TOOD: Illegal Instruction
|
|
error_report("cr16c_tr_translate_insn, illegal instr, insn: 0x%04lx @ 0x%04x\n", insn, ctx->env->pc);
|
|
gen_helper_raise_illegal_instruction(tcg_env);
|
|
base->is_jmp = DISAS_NORETURN;
|
|
base->pc_next += 2;
|
|
};
|
|
}
|
|
|
|
static void cr16c_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) {
|
|
DisasContext* ctx = container_of(dcbase, DisasContext, base);
|
|
|
|
|
|
switch (ctx->base.is_jmp) {
|
|
case DISAS_TOO_MANY:
|
|
gen_goto(dcbase, dcbase->pc_next, 0);
|
|
/* Fall through */
|
|
case DISAS_NORETURN:
|
|
return;
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
}
|
|
|
|
static const TranslatorOps cr16c_tr_ops = {
|
|
.init_disas_context = cr16c_tr_init_disas_context,
|
|
.tb_start = cr16c_tr_tb_start,
|
|
.insn_start = cr16c_tr_insn_start,
|
|
.translate_insn = cr16c_tr_translate_insn,
|
|
.tb_stop = cr16c_tr_tb_stop,
|
|
};
|
|
|
|
void cr16c_translate_init(void) {
|
|
for(int i = 0; i < CR16C_REG_COUNT; i++) {
|
|
r[i] = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, r[i]), cr16c_cpu_r_names[i]);
|
|
}
|
|
pc = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, pc), "pc");
|
|
psr_n = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, psr_n), "psr_n");
|
|
psr_z = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, psr_z), "psr_z");
|
|
psr_f = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, psr_f), "psr_f");
|
|
psr_l = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, psr_l), "psr_l");
|
|
psr_c = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, psr_c), "psr_c");
|
|
psr_t = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, psr_t), "psr_t");
|
|
psr_u = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, psr_u), "psr_u");
|
|
psr_e = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, psr_e), "psr_e");
|
|
psr_p = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, psr_p), "psr_p");
|
|
psr_i = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, psr_i), "psr_i");
|
|
|
|
cfg_dc = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, cfg_dc), "cfg_dc");
|
|
cfg_ldc = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, cfg_ldc), "cfg_ldc");
|
|
cfg_ic = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, cfg_ic), "cfg_ic");
|
|
cfg_lic = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, cfg_lic), "cfg_lic");
|
|
cfg_ed = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, cfg_ed), "cfg_ed");
|
|
cfg_sr = tcg_global_mem_new_i32(tcg_env, offsetof(CPUCR16CState, cfg_sr), "cfg_sr");
|
|
}
|
|
|
|
void cr16c_translate_code(CPUState *cs, TranslationBlock *tb,
|
|
int *max_insns, vaddr pc, void *host_pc)
|
|
{
|
|
DisasContext ctx;
|
|
|
|
translator_loop(cs, tb, max_insns, pc, host_pc, &cr16c_tr_ops, &ctx.base);
|
|
}
|