summaryrefslogtreecommitdiffstats
path: root/qemu/target-alpha/translate.c
diff options
context:
space:
mode:
Diffstat (limited to 'qemu/target-alpha/translate.c')
-rw-r--r--qemu/target-alpha/translate.c297
1 files changed, 170 insertions, 127 deletions
diff --git a/qemu/target-alpha/translate.c b/qemu/target-alpha/translate.c
index 81d4ff827..5b86992dd 100644
--- a/qemu/target-alpha/translate.c
+++ b/qemu/target-alpha/translate.c
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "disas/disas.h"
#include "qemu/host-utils.h"
@@ -27,6 +28,7 @@
#include "exec/helper-gen.h"
#include "trace-tcg.h"
+#include "exec/log.h"
#undef ALPHA_DEBUG_DISAS
@@ -42,6 +44,9 @@ typedef struct DisasContext DisasContext;
struct DisasContext {
struct TranslationBlock *tb;
uint64_t pc;
+#ifndef CONFIG_USER_ONLY
+ uint64_t palbr;
+#endif
int mem_idx;
/* Current rounding mode for this TB. */
@@ -52,6 +57,9 @@ struct DisasContext {
/* implver value for this CPU. */
int implver;
+ /* The set of registers active in the current context. */
+ TCGv *ir;
+
/* Temporaries for $31 and $f31 as source and destination. */
TCGv zero;
TCGv sink;
@@ -85,14 +93,18 @@ typedef enum {
} ExitStatus;
/* global register indexes */
-static TCGv_ptr cpu_env;
-static TCGv cpu_ir[31];
+static TCGv_env cpu_env;
+static TCGv cpu_std_ir[31];
static TCGv cpu_fir[31];
static TCGv cpu_pc;
static TCGv cpu_lock_addr;
static TCGv cpu_lock_st_addr;
static TCGv cpu_lock_value;
+#ifndef CONFIG_USER_ONLY
+static TCGv cpu_pal_ir[31];
+#endif
+
#include "exec/gen-icount.h"
void alpha_translate_init(void)
@@ -122,6 +134,12 @@ void alpha_translate_init(void)
"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
"f24", "f25", "f26", "f27", "f28", "f29", "f30"
};
+#ifndef CONFIG_USER_ONLY
+ static const char shadow_names[8][8] = {
+ "pal_t7", "pal_s0", "pal_s1", "pal_s2",
+ "pal_s3", "pal_s4", "pal_s5", "pal_t11"
+ };
+#endif
static bool done_init = 0;
int i;
@@ -134,20 +152,31 @@ void alpha_translate_init(void)
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
for (i = 0; i < 31; i++) {
- cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUAlphaState, ir[i]),
- greg_names[i]);
+ cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUAlphaState, ir[i]),
+ greg_names[i]);
}
for (i = 0; i < 31; i++) {
- cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUAlphaState, fir[i]),
freg_names[i]);
}
+#ifndef CONFIG_USER_ONLY
+ memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
+ for (i = 0; i < 8; i++) {
+ int r = (i == 7 ? 25 : i + 8);
+ cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUAlphaState,
+ shadow[i]),
+ shadow_names[i]);
+ }
+#endif
+
for (i = 0; i < ARRAY_SIZE(vars); ++i) {
const GlobalVar *v = &vars[i];
- *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
+ *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
}
}
@@ -170,7 +199,7 @@ static TCGv dest_sink(DisasContext *ctx)
static TCGv load_gpr(DisasContext *ctx, unsigned reg)
{
if (likely(reg < 31)) {
- return cpu_ir[reg];
+ return ctx->ir[reg];
} else {
return load_zero(ctx);
}
@@ -183,7 +212,7 @@ static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
ctx->lit = tcg_const_i64(lit);
return ctx->lit;
} else if (likely(reg < 31)) {
- return cpu_ir[reg];
+ return ctx->ir[reg];
} else {
return load_zero(ctx);
}
@@ -192,7 +221,7 @@ static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
{
if (likely(reg < 31)) {
- return cpu_ir[reg];
+ return ctx->ir[reg];
} else {
return dest_sink(ctx);
}
@@ -304,7 +333,7 @@ static inline void gen_load_mem(DisasContext *ctx,
addr = tmp;
}
- va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
+ va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
tcg_gen_qemu_load(va, addr, ctx->mem_idx);
tcg_temp_free(tmp);
@@ -399,13 +428,13 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
- tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
+ tcg_gen_qemu_st_i64(ctx->ir[ra], addr, ctx->mem_idx,
quad ? MO_LEQ : MO_LEUL);
- tcg_gen_movi_i64(cpu_ir[ra], 1);
+ tcg_gen_movi_i64(ctx->ir[ra], 1);
tcg_gen_br(lab_done);
gen_set_label(lab_fail);
- tcg_gen_movi_i64(cpu_ir[ra], 0);
+ tcg_gen_movi_i64(ctx->ir[ra], 0);
gen_set_label(lab_done);
tcg_gen_movi_i64(cpu_lock_addr, -1);
@@ -444,7 +473,7 @@ static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
uint64_t dest = ctx->pc + (disp << 2);
if (ra != 31) {
- tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
+ tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
}
/* Notice branch-to-next; used to initialize RA with the PC. */
@@ -1059,12 +1088,13 @@ static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
}
}
-static void gen_rx(int ra, int set)
+static void gen_rx(DisasContext *ctx, int ra, int set)
{
TCGv_i32 tmp;
if (ra != 31) {
- tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
+ tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env,
+ offsetof(CPUAlphaState, intr_flag));
}
tmp = tcg_const_i32(set);
@@ -1086,12 +1116,12 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
break;
case 0x9E:
/* RDUNIQUE */
- tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
+ tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
offsetof(CPUAlphaState, unique));
break;
case 0x9F:
/* WRUNIQUE */
- tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
offsetof(CPUAlphaState, unique));
break;
default:
@@ -1115,17 +1145,17 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
break;
case 0x2D:
/* WRVPTPTR */
- tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
offsetof(CPUAlphaState, vptptr));
break;
case 0x31:
/* WRVAL */
- tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
offsetof(CPUAlphaState, sysval));
break;
case 0x32:
/* RDVAL */
- tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
+ tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
offsetof(CPUAlphaState, sysval));
break;
@@ -1135,12 +1165,12 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
/* Note that we already know we're in kernel mode, so we know
that PS only contains the 3 IPL bits. */
- tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
+ tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
offsetof(CPUAlphaState, ps));
/* But make sure and store only the 3 IPL bits from the user. */
tmp = tcg_temp_new();
- tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
+ tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
tcg_temp_free(tmp);
break;
@@ -1148,22 +1178,22 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
case 0x36:
/* RDPS */
- tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
+ tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
offsetof(CPUAlphaState, ps));
break;
case 0x38:
/* WRUSP */
- tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
offsetof(CPUAlphaState, usp));
break;
case 0x3A:
/* RDUSP */
- tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
+ tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
offsetof(CPUAlphaState, usp));
break;
case 0x3C:
/* WHAMI */
- tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
+ tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
-offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
break;
@@ -1181,15 +1211,24 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
return gen_excp(ctx, EXCP_CALL_PAL, palcode);
#else
{
- TCGv pc = tcg_const_i64(ctx->pc);
- TCGv entry = tcg_const_i64(palcode & 0x80
- ? 0x2000 + (palcode - 0x80) * 64
- : 0x1000 + palcode * 64);
+ TCGv tmp = tcg_temp_new();
+ uint64_t exc_addr = ctx->pc;
+ uint64_t entry = ctx->palbr;
- gen_helper_call_pal(cpu_env, pc, entry);
+ if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
+ exc_addr |= 1;
+ } else {
+ tcg_gen_movi_i64(tmp, 1);
+ tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
+ }
+
+ tcg_gen_movi_i64(tmp, exc_addr);
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
+ tcg_temp_free(tmp);
- tcg_temp_free(entry);
- tcg_temp_free(pc);
+ entry += (palcode & 0x80
+ ? 0x2000 + (palcode - 0x80) * 64
+ : 0x1000 + palcode * 64);
/* Since the destination is running in PALmode, we don't really
need the page permissions check. We'll see the existence of
@@ -1197,11 +1236,13 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
we change the PAL base register. */
if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, entry);
tcg_gen_exit_tb((uintptr_t)ctx->tb);
return EXIT_GOTO_TB;
+ } else {
+ tcg_gen_movi_i64(cpu_pc, entry);
+ return EXIT_PC_UPDATED;
}
-
- return EXIT_PC_UPDATED;
}
#endif
}
@@ -1228,8 +1269,6 @@ static int cpu_pr_data(int pr)
case 11: return offsetof(CPUAlphaState, sysval);
case 12: return offsetof(CPUAlphaState, usp);
- case 32 ... 39:
- return offsetof(CPUAlphaState, shadow[pr - 32]);
case 40 ... 63:
return offsetof(CPUAlphaState, scratch[pr - 40]);
@@ -1241,36 +1280,48 @@ static int cpu_pr_data(int pr)
static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
{
- int data = cpu_pr_data(regno);
-
- /* Special help for VMTIME and WALLTIME. */
- if (regno == 250 || regno == 249) {
- void (*helper)(TCGv) = gen_helper_get_walltime;
- if (regno == 249) {
- helper = gen_helper_get_vmtime;
- }
- if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ void (*helper)(TCGv);
+ int data;
+
+ switch (regno) {
+ case 32 ... 39:
+ /* Accessing the "non-shadow" general registers. */
+ regno = regno == 39 ? 25 : regno - 32 + 8;
+ tcg_gen_mov_i64(va, cpu_std_ir[regno]);
+ break;
+
+ case 250: /* WALLTIME */
+ helper = gen_helper_get_walltime;
+ goto do_helper;
+ case 249: /* VMTIME */
+ helper = gen_helper_get_vmtime;
+ do_helper:
+ if (use_icount) {
gen_io_start();
helper(va);
gen_io_end();
return EXIT_PC_STALE;
} else {
helper(va);
- return NO_EXIT;
}
- }
+ break;
- /* The basic registers are data only, and unknown registers
- are read-zero, write-ignore. */
- if (data == 0) {
- tcg_gen_movi_i64(va, 0);
- } else if (data & PR_BYTE) {
- tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
- } else if (data & PR_LONG) {
- tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
- } else {
- tcg_gen_ld_i64(va, cpu_env, data);
+ default:
+ /* The basic registers are data only, and unknown registers
+ are read-zero, write-ignore. */
+ data = cpu_pr_data(regno);
+ if (data == 0) {
+ tcg_gen_movi_i64(va, 0);
+ } else if (data & PR_BYTE) {
+ tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
+ } else if (data & PR_LONG) {
+ tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
+ } else {
+ tcg_gen_ld_i64(va, cpu_env, data);
+ }
+ break;
}
+
return NO_EXIT;
}
@@ -1316,6 +1367,12 @@ static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
gen_helper_tb_flush(cpu_env);
return EXIT_PC_STALE;
+ case 32 ... 39:
+ /* Accessing the "non-shadow" general registers. */
+ regno = regno == 39 ? 25 : regno - 32 + 8;
+ tcg_gen_mov_i64(cpu_std_ir[regno], vb);
+ break;
+
default:
/* The basic registers are data only, and unknown registers
are read-zero, write-ignore. */
@@ -1507,7 +1564,12 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x0F:
/* CMPBGE */
- gen_helper_cmpbge(vc, va, vb);
+ if (ra == 31) {
+ /* Special case 0 >= X as X == 0. */
+ gen_helper_cmpbe0(vc, vb);
+ } else {
+ gen_helper_cmpbge(vc, va, vb);
+ }
break;
case 0x12:
/* S8ADDL */
@@ -1952,7 +2014,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
REQUIRE_REG_31(rb);
t32 = tcg_temp_new_i32();
va = load_gpr(ctx, ra);
- tcg_gen_trunc_i64_i32(t32, va);
+ tcg_gen_extrl_i64_i32(t32, va);
gen_helper_memory_to_s(vc, t32);
tcg_temp_free_i32(t32);
break;
@@ -1972,7 +2034,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
REQUIRE_REG_31(rb);
t32 = tcg_temp_new_i32();
va = load_gpr(ctx, ra);
- tcg_gen_trunc_i64_i32(t32, va);
+ tcg_gen_extrl_i64_i32(t32, va);
gen_helper_memory_to_f(vc, t32);
tcg_temp_free_i32(t32);
break;
@@ -2295,14 +2357,14 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0xE000:
/* RC */
- gen_rx(ra, 0);
+ gen_rx(ctx, ra, 0);
break;
case 0xE800:
/* ECB */
break;
case 0xF000:
/* RS */
- gen_rx(ra, 1);
+ gen_rx(ctx, ra, 1);
break;
case 0xF800:
/* WH64 */
@@ -2334,7 +2396,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
vb = load_gpr(ctx, rb);
tcg_gen_andi_i64(cpu_pc, vb, ~3);
if (ra != 31) {
- tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
+ tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
}
ret = EXIT_PC_UPDATED;
break;
@@ -2374,10 +2436,10 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
goto invalid_opc;
break;
case 0x6:
- /* Incpu_ir[ra]id */
+ /* Invalid */
goto invalid_opc;
case 0x7:
- /* Incpu_ir[ra]id */
+ /* Invaliid */
goto invalid_opc;
case 0x8:
/* Longword virtual access (hw_ldl) */
@@ -2580,13 +2642,18 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
/* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
address from EXC_ADDR. This turns out to be useful for our
emulation PALcode, so continue to accept it. */
- tmp = tcg_temp_new();
- tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
- gen_helper_hw_ret(cpu_env, tmp);
- tcg_temp_free(tmp);
+ ctx->lit = vb = tcg_temp_new();
+ tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
} else {
- gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
+ vb = load_gpr(ctx, rb);
}
+ tmp = tcg_temp_new();
+ tcg_gen_movi_i64(tmp, 0);
+ tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
+ tcg_gen_movi_i64(cpu_lock_addr, -1);
+ tcg_gen_andi_i64(tmp, vb, 1);
+ tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
+ tcg_gen_andi_i64(cpu_pc, vb, ~3);
ret = EXIT_PC_UPDATED;
break;
#else
@@ -2793,18 +2860,14 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
return ret;
}
-static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
- TranslationBlock *tb,
- bool search_pc)
+void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
{
+ AlphaCPU *cpu = alpha_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- CPUAlphaState *env = &cpu->env;
DisasContext ctx, *ctxp = &ctx;
target_ulong pc_start;
target_ulong pc_mask;
uint32_t insn;
- CPUBreakpoint *bp;
- int j, lj = -1;
ExitStatus ret;
int num_insns;
int max_insns;
@@ -2813,10 +2876,17 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
ctx.tb = tb;
ctx.pc = pc_start;
- ctx.mem_idx = cpu_mmu_index(env);
+ ctx.mem_idx = cpu_mmu_index(env, false);
ctx.implver = env->implver;
ctx.singlestep_enabled = cs->singlestep_enabled;
+#ifdef CONFIG_USER_ONLY
+ ctx.ir = cpu_std_ir;
+#else
+ ctx.palbr = env->palbr;
+ ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
+#endif
+
/* ??? Every TB begins with unset rounding mode, to be initialized on
the first fp insn of the TB. Alternately we could define a proper
default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
@@ -2832,6 +2902,9 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
if (in_superpage(&ctx, pc_start)) {
pc_mask = (1ULL << 41) - 1;
@@ -2841,35 +2914,22 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
gen_tb_start(tb);
do {
- if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
- if (bp->pc == ctx.pc) {
- gen_excp(&ctx, EXCP_DEBUG, 0);
- break;
- }
- }
- }
- if (search_pc) {
- j = tcg_op_buf_count();
- if (lj < j) {
- lj++;
- while (lj < j) {
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
- }
- }
- tcg_ctx.gen_opc_pc[lj] = ctx.pc;
- tcg_ctx.gen_opc_instr_start[lj] = 1;
- tcg_ctx.gen_opc_icount[lj] = num_insns;
+ tcg_gen_insn_start(ctx.pc);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
+ ret = gen_excp(&ctx, EXCP_DEBUG, 0);
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ ctx.pc += 4;
+ break;
}
- if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
insn = cpu_ldl_code(env, ctx.pc);
- num_insns++;
-
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
- tcg_gen_debug_insn_start(ctx.pc);
- }
TCGV_UNUSED_I64(ctx.zero);
TCGV_UNUSED_I64(ctx.sink);
@@ -2925,16 +2985,8 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
gen_tb_end(tb, num_insns);
- if (search_pc) {
- j = tcg_op_buf_count();
- lj++;
- while (lj <= j) {
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
- }
- } else {
- tb->size = ctx.pc - pc_start;
- tb->icount = num_insns;
- }
+ tb->size = ctx.pc - pc_start;
+ tb->icount = num_insns;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
@@ -2945,17 +2997,8 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
#endif
}
-void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
-{
- gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
-}
-
-void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
-{
- gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
-}
-
-void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
+ target_ulong *data)
{
- env->pc = tcg_ctx.gen_opc_pc[pc_pos];
+ env->pc = data[0];
}