From 437fd90c0250dee670290f9b714253671a990160 Mon Sep 17 00:00:00 2001 From: José Pekkarinen Date: Wed, 18 May 2016 13:18:31 +0300 Subject: These changes are the raw update to qemu-2.6. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Collission happened in the following patches: migration: do cleanup operation after completion(738df5b9) Bug fix.(1750c932f86) kvmclock: add a new function to update env->tsc.(b52baab2) The code provided by the patches was already in the upstreamed version. Change-Id: I3cc11841a6a76ae20887b2e245710199e1ea7f9a Signed-off-by: José Pekkarinen --- qemu/target-tilegx/Makefile.objs | 1 + qemu/target-tilegx/cpu.c | 187 +++ qemu/target-tilegx/cpu.h | 181 +++ qemu/target-tilegx/helper.c | 162 +++ qemu/target-tilegx/helper.h | 26 + qemu/target-tilegx/opcode_tilegx.h | 1406 +++++++++++++++++++++ qemu/target-tilegx/simd_helper.c | 166 +++ qemu/target-tilegx/spr_def_64.h | 216 ++++ qemu/target-tilegx/translate.c | 2451 ++++++++++++++++++++++++++++++++++++ 9 files changed, 4796 insertions(+) create mode 100644 qemu/target-tilegx/Makefile.objs create mode 100644 qemu/target-tilegx/cpu.c create mode 100644 qemu/target-tilegx/cpu.h create mode 100644 qemu/target-tilegx/helper.c create mode 100644 qemu/target-tilegx/helper.h create mode 100644 qemu/target-tilegx/opcode_tilegx.h create mode 100644 qemu/target-tilegx/simd_helper.c create mode 100644 qemu/target-tilegx/spr_def_64.h create mode 100644 qemu/target-tilegx/translate.c (limited to 'qemu/target-tilegx') diff --git a/qemu/target-tilegx/Makefile.objs b/qemu/target-tilegx/Makefile.objs new file mode 100644 index 000000000..0db778f40 --- /dev/null +++ b/qemu/target-tilegx/Makefile.objs @@ -0,0 +1 @@ +obj-y += cpu.o translate.o helper.o simd_helper.o diff --git a/qemu/target-tilegx/cpu.c b/qemu/target-tilegx/cpu.c new file mode 100644 index 000000000..d2d091203 --- /dev/null +++ b/qemu/target-tilegx/cpu.c @@ -0,0 +1,187 @@ +/* + * QEMU TILE-Gx CPU + * + * Copyright (c) 2015 Chen Gang + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "qemu-common.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "linux-user/syscall_defs.h" + +static void tilegx_cpu_dump_state(CPUState *cs, FILE *f, + fprintf_function cpu_fprintf, int flags) +{ + static const char * const reg_names[TILEGX_R_COUNT] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", + "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", + "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", + "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr" + }; + + TileGXCPU *cpu = TILEGX_CPU(cs); + CPUTLGState *env = &cpu->env; + int i; + + for (i = 0; i < TILEGX_R_COUNT; i++) { + cpu_fprintf(f, "%-4s" TARGET_FMT_lx "%s", + reg_names[i], env->regs[i], + (i % 4) == 3 ? "\n" : " "); + } + cpu_fprintf(f, "PC " TARGET_FMT_lx " CEX " TARGET_FMT_lx "\n\n", + env->pc, env->spregs[TILEGX_SPR_CMPEXCH]); +} + +TileGXCPU *cpu_tilegx_init(const char *cpu_model) +{ + TileGXCPU *cpu; + + cpu = TILEGX_CPU(object_new(TYPE_TILEGX_CPU)); + + object_property_set_bool(OBJECT(cpu), true, "realized", NULL); + + return cpu; +} + +static void tilegx_cpu_set_pc(CPUState *cs, vaddr value) +{ + TileGXCPU *cpu = TILEGX_CPU(cs); + + cpu->env.pc = value; +} + +static bool tilegx_cpu_has_work(CPUState *cs) +{ + return true; +} + +static void tilegx_cpu_reset(CPUState *s) +{ + TileGXCPU *cpu = TILEGX_CPU(s); + TileGXCPUClass *tcc = TILEGX_CPU_GET_CLASS(cpu); + CPUTLGState *env = &cpu->env; + + tcc->parent_reset(s); + + memset(env, 0, sizeof(CPUTLGState)); + tlb_flush(s, 1); +} + +static void tilegx_cpu_realizefn(DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + TileGXCPUClass *tcc = TILEGX_CPU_GET_CLASS(dev); + + cpu_reset(cs); + qemu_init_vcpu(cs); + + tcc->parent_realize(dev, errp); +} + +static void tilegx_cpu_initfn(Object *obj) +{ + CPUState *cs = CPU(obj); + TileGXCPU *cpu = TILEGX_CPU(obj); + CPUTLGState *env = &cpu->env; + static bool tcg_initialized; + + cs->env_ptr = env; + cpu_exec_init(cs, &error_abort); + + if (tcg_enabled() && !tcg_initialized) { + tcg_initialized = true; + tilegx_tcg_init(); + } +} + +static void tilegx_cpu_do_interrupt(CPUState *cs) +{ + cs->exception_index = -1; +} + +static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, + int mmu_idx) +{ + TileGXCPU *cpu = TILEGX_CPU(cs); + + /* The sigcode field will be filled in by do_signal in main.c. */ + cs->exception_index = TILEGX_EXCP_SIGNAL; + cpu->env.excaddr = address; + cpu->env.signo = TARGET_SIGSEGV; + cpu->env.sigcode = 0; + + return 1; +} + +static bool tilegx_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + tilegx_cpu_do_interrupt(cs); + return true; + } + return false; +} + +static void tilegx_cpu_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + TileGXCPUClass *tcc = TILEGX_CPU_CLASS(oc); + + tcc->parent_realize = dc->realize; + dc->realize = tilegx_cpu_realizefn; + + tcc->parent_reset = cc->reset; + cc->reset = tilegx_cpu_reset; + + cc->has_work = tilegx_cpu_has_work; + cc->do_interrupt = tilegx_cpu_do_interrupt; + cc->cpu_exec_interrupt = tilegx_cpu_exec_interrupt; + cc->dump_state = tilegx_cpu_dump_state; + cc->set_pc = tilegx_cpu_set_pc; + cc->handle_mmu_fault = tilegx_cpu_handle_mmu_fault; + cc->gdb_num_core_regs = 0; + + /* + * Reason: tilegx_cpu_initfn() calls cpu_exec_init(), which saves + * the object in cpus -> dangling pointer after final + * object_unref(). + */ + dc->cannot_destroy_with_object_finalize_yet = true; +} + +static const TypeInfo tilegx_cpu_type_info = { + .name = TYPE_TILEGX_CPU, + .parent = TYPE_CPU, + .instance_size = sizeof(TileGXCPU), + .instance_init = tilegx_cpu_initfn, + .class_size = sizeof(TileGXCPUClass), + .class_init = tilegx_cpu_class_init, +}; + +static void tilegx_cpu_register_types(void) +{ + type_register_static(&tilegx_cpu_type_info); +} + +type_init(tilegx_cpu_register_types) diff --git a/qemu/target-tilegx/cpu.h b/qemu/target-tilegx/cpu.h new file mode 100644 index 000000000..022cad186 --- /dev/null +++ b/qemu/target-tilegx/cpu.h @@ -0,0 +1,181 @@ +/* + * TILE-Gx virtual CPU header + * + * Copyright (c) 2015 Chen Gang + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#ifndef CPU_TILEGX_H +#define CPU_TILEGX_H + +#include "qemu-common.h" + +#define TARGET_LONG_BITS 64 + +#define CPUArchState struct CPUTLGState + +#include "exec/cpu-defs.h" + + +/* TILE-Gx common register alias */ +#define TILEGX_R_RE 0 /* 0 register, for function/syscall return value */ +#define TILEGX_R_ERR 1 /* 1 register, for syscall errno flag */ +#define TILEGX_R_NR 10 /* 10 register, for syscall number */ +#define TILEGX_R_BP 52 /* 52 register, optional frame pointer */ +#define TILEGX_R_TP 53 /* TP register, thread local storage data */ +#define TILEGX_R_SP 54 /* SP register, stack pointer */ +#define TILEGX_R_LR 55 /* LR register, may save pc, but it is not pc */ +#define TILEGX_R_COUNT 56 /* Only 56 registers are really useful */ +#define TILEGX_R_SN 56 /* SN register, obsoleted, it likes zero register */ +#define TILEGX_R_IDN0 57 /* IDN0 register, cause IDN_ACCESS exception */ +#define TILEGX_R_IDN1 58 /* IDN1 register, cause IDN_ACCESS exception */ +#define TILEGX_R_UDN0 59 /* UDN0 register, cause UDN_ACCESS exception */ +#define TILEGX_R_UDN1 60 /* UDN1 register, cause UDN_ACCESS exception */ +#define TILEGX_R_UDN2 61 /* UDN2 register, cause UDN_ACCESS exception */ +#define TILEGX_R_UDN3 62 /* UDN3 register, cause UDN_ACCESS exception */ +#define TILEGX_R_ZERO 63 /* Zero register, always zero */ +#define TILEGX_R_NOREG 255 /* Invalid register value */ + +/* TILE-Gx special registers used by outside */ +enum { + TILEGX_SPR_CMPEXCH = 0, + TILEGX_SPR_CRITICAL_SEC = 1, + TILEGX_SPR_SIM_CONTROL = 2, + TILEGX_SPR_EX_CONTEXT_0_0 = 3, + TILEGX_SPR_EX_CONTEXT_0_1 = 4, + TILEGX_SPR_COUNT +}; + +/* Exception numbers */ +typedef enum { + TILEGX_EXCP_NONE = 0, + TILEGX_EXCP_SYSCALL = 1, + TILEGX_EXCP_SIGNAL = 2, + TILEGX_EXCP_OPCODE_UNKNOWN = 0x101, + TILEGX_EXCP_OPCODE_UNIMPLEMENTED = 0x102, + TILEGX_EXCP_OPCODE_CMPEXCH = 0x103, + TILEGX_EXCP_OPCODE_CMPEXCH4 = 0x104, + TILEGX_EXCP_OPCODE_EXCH = 0x105, + TILEGX_EXCP_OPCODE_EXCH4 = 0x106, + TILEGX_EXCP_OPCODE_FETCHADD = 0x107, + TILEGX_EXCP_OPCODE_FETCHADD4 = 0x108, + TILEGX_EXCP_OPCODE_FETCHADDGEZ = 0x109, + TILEGX_EXCP_OPCODE_FETCHADDGEZ4 = 0x10a, + TILEGX_EXCP_OPCODE_FETCHAND = 0x10b, + TILEGX_EXCP_OPCODE_FETCHAND4 = 0x10c, + TILEGX_EXCP_OPCODE_FETCHOR = 0x10d, + TILEGX_EXCP_OPCODE_FETCHOR4 = 0x10e, + TILEGX_EXCP_REG_IDN_ACCESS = 0x181, + TILEGX_EXCP_REG_UDN_ACCESS = 0x182, + TILEGX_EXCP_UNALIGNMENT = 0x201, + TILEGX_EXCP_DBUG_BREAK = 0x301 +} TileExcp; + +typedef struct CPUTLGState { + uint64_t regs[TILEGX_R_COUNT]; /* Common used registers by outside */ + uint64_t spregs[TILEGX_SPR_COUNT]; /* Special used registers by outside */ + uint64_t pc; /* Current pc */ + +#if defined(CONFIG_USER_ONLY) + uint64_t excaddr; /* exception address */ + uint64_t atomic_srca; /* Arguments to atomic "exceptions" */ + uint64_t atomic_srcb; + uint32_t atomic_dstr; + uint32_t signo; /* Signal number */ + uint32_t sigcode; /* Signal code */ +#endif + + CPU_COMMON +} CPUTLGState; + +#include "qom/cpu.h" + +#define TYPE_TILEGX_CPU "tilegx-cpu" + +#define TILEGX_CPU_CLASS(klass) \ + OBJECT_CLASS_CHECK(TileGXCPUClass, (klass), TYPE_TILEGX_CPU) +#define TILEGX_CPU(obj) \ + OBJECT_CHECK(TileGXCPU, (obj), TYPE_TILEGX_CPU) +#define TILEGX_CPU_GET_CLASS(obj) \ + OBJECT_GET_CLASS(TileGXCPUClass, (obj), TYPE_TILEGX_CPU) + +/** + * TileGXCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * A Tile-Gx CPU model. + */ +typedef struct TileGXCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; + void (*parent_reset)(CPUState *cpu); +} TileGXCPUClass; + +/** + * TileGXCPU: + * @env: #CPUTLGState + * + * A Tile-GX CPU. + */ +typedef struct TileGXCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUTLGState env; +} TileGXCPU; + +static inline TileGXCPU *tilegx_env_get_cpu(CPUTLGState *env) +{ + return container_of(env, TileGXCPU, env); +} + +#define ENV_GET_CPU(e) CPU(tilegx_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(TileGXCPU, env) + +/* TILE-Gx memory attributes */ +#define TARGET_PAGE_BITS 16 /* TILE-Gx uses 64KB page size */ +#define TARGET_PHYS_ADDR_SPACE_BITS 42 +#define TARGET_VIRT_ADDR_SPACE_BITS 64 +#define MMU_USER_IDX 0 /* Current memory operation is in user mode */ + +#include "exec/cpu-all.h" + +void tilegx_tcg_init(void); +int cpu_tilegx_exec(CPUState *s); +int cpu_tilegx_signal_handler(int host_signum, void *pinfo, void *puc); + +TileGXCPU *cpu_tilegx_init(const char *cpu_model); + +#define cpu_init(cpu_model) CPU(cpu_tilegx_init(cpu_model)) + +#define cpu_exec cpu_tilegx_exec +#define cpu_signal_handler cpu_tilegx_signal_handler + +static inline void cpu_get_tb_cpu_state(CPUTLGState *env, target_ulong *pc, + target_ulong *cs_base, int *flags) +{ + *pc = env->pc; + *cs_base = 0; + *flags = 0; +} + +#include "exec/exec-all.h" + +#endif diff --git a/qemu/target-tilegx/helper.c b/qemu/target-tilegx/helper.c new file mode 100644 index 000000000..616c5c7cf --- /dev/null +++ b/qemu/target-tilegx/helper.c @@ -0,0 +1,162 @@ +/* + * QEMU TILE-Gx helpers + * + * Copyright (c) 2015 Chen Gang + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "qemu-common.h" +#include "exec/helper-proto.h" +#include /* For crc32 */ +#include "syscall_defs.h" + +void helper_exception(CPUTLGState *env, uint32_t excp) +{ + CPUState *cs = CPU(tilegx_env_get_cpu(env)); + + cs->exception_index = excp; + cpu_loop_exit(cs); +} + +void helper_ext01_ics(CPUTLGState *env) +{ + uint64_t val = env->spregs[TILEGX_SPR_EX_CONTEXT_0_1]; + + switch (val) { + case 0: + case 1: + env->spregs[TILEGX_SPR_CRITICAL_SEC] = val; + break; + default: +#if defined(CONFIG_USER_ONLY) + env->signo = TARGET_SIGILL; + env->sigcode = TARGET_ILL_ILLOPC; + helper_exception(env, TILEGX_EXCP_SIGNAL); +#else + helper_exception(env, TILEGX_EXCP_OPCODE_UNIMPLEMENTED); +#endif + break; + } +} + +uint64_t helper_cntlz(uint64_t arg) +{ + return clz64(arg); +} + +uint64_t helper_cnttz(uint64_t arg) +{ + return ctz64(arg); +} + +uint64_t helper_pcnt(uint64_t arg) +{ + return ctpop64(arg); +} + +uint64_t helper_revbits(uint64_t arg) +{ + return revbit64(arg); +} + +/* + * Functional Description + * uint64_t a = rf[SrcA]; + * uint64_t b = rf[SrcB]; + * uint64_t d = rf[Dest]; + * uint64_t output = 0; + * unsigned int counter; + * for (counter = 0; counter < (WORD_SIZE / BYTE_SIZE); counter++) + * { + * int sel = getByte (b, counter) & 0xf; + * uint8_t byte = (sel < 8) ? getByte (d, sel) : getByte (a, (sel - 8)); + * output = setByte (output, counter, byte); + * } + * rf[Dest] = output; + */ +uint64_t helper_shufflebytes(uint64_t dest, uint64_t srca, uint64_t srcb) +{ + uint64_t vdst = 0; + int count; + + for (count = 0; count < 64; count += 8) { + uint64_t sel = srcb >> count; + uint64_t src = (sel & 8) ? srca : dest; + vdst |= extract64(src, (sel & 7) * 8, 8) << count; + } + + return vdst; +} + +uint64_t helper_crc32_8(uint64_t accum, uint64_t input) +{ + uint8_t buf = input; + + /* zlib crc32 converts the accumulator and output to one's complement. */ + return crc32(accum ^ 0xffffffff, &buf, 1) ^ 0xffffffff; +} + +uint64_t helper_crc32_32(uint64_t accum, uint64_t input) +{ + uint8_t buf[4]; + + stl_le_p(buf, input); + + /* zlib crc32 converts the accumulator and output to one's complement. */ + return crc32(accum ^ 0xffffffff, buf, 4) ^ 0xffffffff; +} + +uint64_t helper_cmula(uint64_t srcd, uint64_t srca, uint64_t srcb) +{ + uint32_t reala = (int16_t)srca; + uint32_t imaga = (int16_t)(srca >> 16); + uint32_t realb = (int16_t)srcb; + uint32_t imagb = (int16_t)(srcb >> 16); + uint32_t reald = srcd; + uint32_t imagd = srcd >> 32; + uint32_t realr = reala * realb - imaga * imagb + reald; + uint32_t imagr = reala * imagb + imaga * realb + imagd; + + return deposit64(realr, 32, 32, imagr); +} + +uint64_t helper_cmulaf(uint64_t srcd, uint64_t srca, uint64_t srcb) +{ + uint32_t reala = (int16_t)srca; + uint32_t imaga = (int16_t)(srca >> 16); + uint32_t realb = (int16_t)srcb; + uint32_t imagb = (int16_t)(srcb >> 16); + uint32_t reald = (int16_t)srcd; + uint32_t imagd = (int16_t)(srcd >> 16); + int32_t realr = reala * realb - imaga * imagb; + int32_t imagr = reala * imagb + imaga * realb; + + return deposit32((realr >> 15) + reald, 16, 16, (imagr >> 15) + imagd); +} + +uint64_t helper_cmul2(uint64_t srca, uint64_t srcb, int shift, int round) +{ + uint32_t reala = (int16_t)srca; + uint32_t imaga = (int16_t)(srca >> 16); + uint32_t realb = (int16_t)srcb; + uint32_t imagb = (int16_t)(srcb >> 16); + int32_t realr = reala * realb - imaga * imagb + round; + int32_t imagr = reala * imagb + imaga * realb + round; + + return deposit32(realr >> shift, 16, 16, imagr >> shift); +} diff --git a/qemu/target-tilegx/helper.h b/qemu/target-tilegx/helper.h new file mode 100644 index 000000000..9281d0f42 --- /dev/null +++ b/qemu/target-tilegx/helper.h @@ -0,0 +1,26 @@ +DEF_HELPER_2(exception, noreturn, env, i32) +DEF_HELPER_1(ext01_ics, void, env) +DEF_HELPER_FLAGS_1(cntlz, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(cnttz, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(pcnt, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(revbits, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_3(shufflebytes, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) +DEF_HELPER_FLAGS_2(crc32_8, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(crc32_32, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_3(cmula, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) +DEF_HELPER_FLAGS_3(cmulaf, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) +DEF_HELPER_FLAGS_4(cmul2, TCG_CALL_NO_RWG_SE, i64, i64, i64, int, int) + +DEF_HELPER_FLAGS_2(v1int_h, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(v1int_l, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(v2int_h, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(v2int_l, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(v1multu, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(v2mults, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(v1shl, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(v1shru, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(v1shrs, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(v2shl, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(v2shru, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(v2shrs, TCG_CALL_NO_RWG_SE, i64, i64, i64) diff --git a/qemu/target-tilegx/opcode_tilegx.h b/qemu/target-tilegx/opcode_tilegx.h new file mode 100644 index 000000000..989436d2f --- /dev/null +++ b/qemu/target-tilegx/opcode_tilegx.h @@ -0,0 +1,1406 @@ +/* TILE-Gx opcode information. + * + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * + * + * + * + */ + +#ifndef __ARCH_OPCODE_H__ +#define __ARCH_OPCODE_H__ + +#ifndef __ASSEMBLER__ + +typedef uint64_t tilegx_bundle_bits; + +/* These are the bits that determine if a bundle is in the X encoding. */ +#define TILEGX_BUNDLE_MODE_MASK ((tilegx_bundle_bits)3 << 62) + +enum +{ + /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ + TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE = 3, + + /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ + TILEGX_NUM_PIPELINE_ENCODINGS = 5, + + /* Log base 2 of TILEGX_BUNDLE_SIZE_IN_BYTES. */ + TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES = 3, + + /* Instructions take this many bytes. */ + TILEGX_BUNDLE_SIZE_IN_BYTES = 1 << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES, + + /* Log base 2 of TILEGX_BUNDLE_ALIGNMENT_IN_BYTES. */ + TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, + + /* Bundles should be aligned modulo this number of bytes. */ + TILEGX_BUNDLE_ALIGNMENT_IN_BYTES = + (1 << TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), + + /* Number of registers (some are magic, such as network I/O). */ + TILEGX_NUM_REGISTERS = 64, +}; + +/* Make a few "tile_" variables to simplify common code between + architectures. */ + +typedef tilegx_bundle_bits tile_bundle_bits; +#define TILE_BUNDLE_SIZE_IN_BYTES TILEGX_BUNDLE_SIZE_IN_BYTES +#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES +#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \ + TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES +#define TILE_BPT_BUNDLE TILEGX_BPT_BUNDLE + +/* 64-bit pattern for a { bpt ; nop } bundle. */ +#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL + +static inline unsigned int +get_BFEnd_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 12)) & 0x3f); +} + +static inline unsigned int +get_BFOpcodeExtension_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 24)) & 0xf); +} + +static inline unsigned int +get_BFStart_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 18)) & 0x3f); +} + +static inline unsigned int +get_BrOff_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 31)) & 0x0000003f) | + (((unsigned int)(n >> 37)) & 0x0001ffc0); +} + +static inline unsigned int +get_BrType_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 54)) & 0x1f); +} + +static inline unsigned int +get_Dest_Imm8_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 31)) & 0x0000003f) | + (((unsigned int)(n >> 43)) & 0x000000c0); +} + +static inline unsigned int +get_Dest_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 0)) & 0x3f); +} + +static inline unsigned int +get_Dest_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 31)) & 0x3f); +} + +static inline unsigned int +get_Dest_Y0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 0)) & 0x3f); +} + +static inline unsigned int +get_Dest_Y1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 31)) & 0x3f); +} + +static inline unsigned int +get_Imm16_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 12)) & 0xffff); +} + +static inline unsigned int +get_Imm16_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 43)) & 0xffff); +} + +static inline unsigned int +get_Imm8OpcodeExtension_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 20)) & 0xff); +} + +static inline unsigned int +get_Imm8OpcodeExtension_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 51)) & 0xff); +} + +static inline unsigned int +get_Imm8_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 12)) & 0xff); +} + +static inline unsigned int +get_Imm8_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 43)) & 0xff); +} + +static inline unsigned int +get_Imm8_Y0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 12)) & 0xff); +} + +static inline unsigned int +get_Imm8_Y1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 43)) & 0xff); +} + +static inline unsigned int +get_JumpOff_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 31)) & 0x7ffffff); +} + +static inline unsigned int +get_JumpOpcodeExtension_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 58)) & 0x1); +} + +static inline unsigned int +get_MF_Imm14_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 37)) & 0x3fff); +} + +static inline unsigned int +get_MT_Imm14_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 31)) & 0x0000003f) | + (((unsigned int)(n >> 37)) & 0x00003fc0); +} + +static inline unsigned int +get_Mode(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 62)) & 0x3); +} + +static inline unsigned int +get_Opcode_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 28)) & 0x7); +} + +static inline unsigned int +get_Opcode_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 59)) & 0x7); +} + +static inline unsigned int +get_Opcode_Y0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 27)) & 0xf); +} + +static inline unsigned int +get_Opcode_Y1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 58)) & 0xf); +} + +static inline unsigned int +get_Opcode_Y2(tilegx_bundle_bits n) +{ + return (((n >> 26)) & 0x00000001) | + (((unsigned int)(n >> 56)) & 0x00000002); +} + +static inline unsigned int +get_RRROpcodeExtension_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 18)) & 0x3ff); +} + +static inline unsigned int +get_RRROpcodeExtension_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 49)) & 0x3ff); +} + +static inline unsigned int +get_RRROpcodeExtension_Y0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 18)) & 0x3); +} + +static inline unsigned int +get_RRROpcodeExtension_Y1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 49)) & 0x3); +} + +static inline unsigned int +get_ShAmt_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 12)) & 0x3f); +} + +static inline unsigned int +get_ShAmt_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 43)) & 0x3f); +} + +static inline unsigned int +get_ShAmt_Y0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 12)) & 0x3f); +} + +static inline unsigned int +get_ShAmt_Y1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 43)) & 0x3f); +} + +static inline unsigned int +get_ShiftOpcodeExtension_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 18)) & 0x3ff); +} + +static inline unsigned int +get_ShiftOpcodeExtension_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 49)) & 0x3ff); +} + +static inline unsigned int +get_ShiftOpcodeExtension_Y0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 18)) & 0x3); +} + +static inline unsigned int +get_ShiftOpcodeExtension_Y1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 49)) & 0x3); +} + +static inline unsigned int +get_SrcA_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 6)) & 0x3f); +} + +static inline unsigned int +get_SrcA_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 37)) & 0x3f); +} + +static inline unsigned int +get_SrcA_Y0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 6)) & 0x3f); +} + +static inline unsigned int +get_SrcA_Y1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 37)) & 0x3f); +} + +static inline unsigned int +get_SrcA_Y2(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 20)) & 0x3f); +} + +static inline unsigned int +get_SrcBDest_Y2(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 51)) & 0x3f); +} + +static inline unsigned int +get_SrcB_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 12)) & 0x3f); +} + +static inline unsigned int +get_SrcB_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 43)) & 0x3f); +} + +static inline unsigned int +get_SrcB_Y0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 12)) & 0x3f); +} + +static inline unsigned int +get_SrcB_Y1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 43)) & 0x3f); +} + +static inline unsigned int +get_UnaryOpcodeExtension_X0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 12)) & 0x3f); +} + +static inline unsigned int +get_UnaryOpcodeExtension_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 43)) & 0x3f); +} + +static inline unsigned int +get_UnaryOpcodeExtension_Y0(tilegx_bundle_bits num) +{ + const unsigned int n = (unsigned int)num; + return (((n >> 12)) & 0x3f); +} + +static inline unsigned int +get_UnaryOpcodeExtension_Y1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 43)) & 0x3f); +} + + +static inline int +sign_extend(int n, int num_bits) +{ + int shift = (int)(sizeof(int) * 8 - num_bits); + return (n << shift) >> shift; +} + + + +static inline tilegx_bundle_bits +create_BFEnd_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 12); +} + +static inline tilegx_bundle_bits +create_BFOpcodeExtension_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0xf) << 24); +} + +static inline tilegx_bundle_bits +create_BFStart_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 18); +} + +static inline tilegx_bundle_bits +create_BrOff_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) | + (((tilegx_bundle_bits)(n & 0x0001ffc0)) << 37); +} + +static inline tilegx_bundle_bits +create_BrType_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x1f)) << 54); +} + +static inline tilegx_bundle_bits +create_Dest_Imm8_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) | + (((tilegx_bundle_bits)(n & 0x000000c0)) << 43); +} + +static inline tilegx_bundle_bits +create_Dest_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 0); +} + +static inline tilegx_bundle_bits +create_Dest_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3f)) << 31); +} + +static inline tilegx_bundle_bits +create_Dest_Y0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 0); +} + +static inline tilegx_bundle_bits +create_Dest_Y1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3f)) << 31); +} + +static inline tilegx_bundle_bits +create_Imm16_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0xffff) << 12); +} + +static inline tilegx_bundle_bits +create_Imm16_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0xffff)) << 43); +} + +static inline tilegx_bundle_bits +create_Imm8OpcodeExtension_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0xff) << 20); +} + +static inline tilegx_bundle_bits +create_Imm8OpcodeExtension_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0xff)) << 51); +} + +static inline tilegx_bundle_bits +create_Imm8_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0xff) << 12); +} + +static inline tilegx_bundle_bits +create_Imm8_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0xff)) << 43); +} + +static inline tilegx_bundle_bits +create_Imm8_Y0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0xff) << 12); +} + +static inline tilegx_bundle_bits +create_Imm8_Y1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0xff)) << 43); +} + +static inline tilegx_bundle_bits +create_JumpOff_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x7ffffff)) << 31); +} + +static inline tilegx_bundle_bits +create_JumpOpcodeExtension_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x1)) << 58); +} + +static inline tilegx_bundle_bits +create_MF_Imm14_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3fff)) << 37); +} + +static inline tilegx_bundle_bits +create_MT_Imm14_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) | + (((tilegx_bundle_bits)(n & 0x00003fc0)) << 37); +} + +static inline tilegx_bundle_bits +create_Mode(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3)) << 62); +} + +static inline tilegx_bundle_bits +create_Opcode_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x7) << 28); +} + +static inline tilegx_bundle_bits +create_Opcode_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x7)) << 59); +} + +static inline tilegx_bundle_bits +create_Opcode_Y0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0xf) << 27); +} + +static inline tilegx_bundle_bits +create_Opcode_Y1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0xf)) << 58); +} + +static inline tilegx_bundle_bits +create_Opcode_Y2(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x00000001) << 26) | + (((tilegx_bundle_bits)(n & 0x00000002)) << 56); +} + +static inline tilegx_bundle_bits +create_RRROpcodeExtension_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3ff) << 18); +} + +static inline tilegx_bundle_bits +create_RRROpcodeExtension_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3ff)) << 49); +} + +static inline tilegx_bundle_bits +create_RRROpcodeExtension_Y0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3) << 18); +} + +static inline tilegx_bundle_bits +create_RRROpcodeExtension_Y1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3)) << 49); +} + +static inline tilegx_bundle_bits +create_ShAmt_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 12); +} + +static inline tilegx_bundle_bits +create_ShAmt_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); +} + +static inline tilegx_bundle_bits +create_ShAmt_Y0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 12); +} + +static inline tilegx_bundle_bits +create_ShAmt_Y1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); +} + +static inline tilegx_bundle_bits +create_ShiftOpcodeExtension_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3ff) << 18); +} + +static inline tilegx_bundle_bits +create_ShiftOpcodeExtension_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3ff)) << 49); +} + +static inline tilegx_bundle_bits +create_ShiftOpcodeExtension_Y0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3) << 18); +} + +static inline tilegx_bundle_bits +create_ShiftOpcodeExtension_Y1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3)) << 49); +} + +static inline tilegx_bundle_bits +create_SrcA_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 6); +} + +static inline tilegx_bundle_bits +create_SrcA_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3f)) << 37); +} + +static inline tilegx_bundle_bits +create_SrcA_Y0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 6); +} + +static inline tilegx_bundle_bits +create_SrcA_Y1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3f)) << 37); +} + +static inline tilegx_bundle_bits +create_SrcA_Y2(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 20); +} + +static inline tilegx_bundle_bits +create_SrcBDest_Y2(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3f)) << 51); +} + +static inline tilegx_bundle_bits +create_SrcB_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 12); +} + +static inline tilegx_bundle_bits +create_SrcB_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); +} + +static inline tilegx_bundle_bits +create_SrcB_Y0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 12); +} + +static inline tilegx_bundle_bits +create_SrcB_Y1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); +} + +static inline tilegx_bundle_bits +create_UnaryOpcodeExtension_X0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 12); +} + +static inline tilegx_bundle_bits +create_UnaryOpcodeExtension_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); +} + +static inline tilegx_bundle_bits +create_UnaryOpcodeExtension_Y0(int num) +{ + const unsigned int n = (unsigned int)num; + return ((n & 0x3f) << 12); +} + +static inline tilegx_bundle_bits +create_UnaryOpcodeExtension_Y1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); +} + + +enum +{ + ADDI_IMM8_OPCODE_X0 = 1, + ADDI_IMM8_OPCODE_X1 = 1, + ADDI_OPCODE_Y0 = 0, + ADDI_OPCODE_Y1 = 1, + ADDLI_OPCODE_X0 = 1, + ADDLI_OPCODE_X1 = 0, + ADDXI_IMM8_OPCODE_X0 = 2, + ADDXI_IMM8_OPCODE_X1 = 2, + ADDXI_OPCODE_Y0 = 1, + ADDXI_OPCODE_Y1 = 2, + ADDXLI_OPCODE_X0 = 2, + ADDXLI_OPCODE_X1 = 1, + ADDXSC_RRR_0_OPCODE_X0 = 1, + ADDXSC_RRR_0_OPCODE_X1 = 1, + ADDX_RRR_0_OPCODE_X0 = 2, + ADDX_RRR_0_OPCODE_X1 = 2, + ADDX_RRR_0_OPCODE_Y0 = 0, + ADDX_RRR_0_OPCODE_Y1 = 0, + ADD_RRR_0_OPCODE_X0 = 3, + ADD_RRR_0_OPCODE_X1 = 3, + ADD_RRR_0_OPCODE_Y0 = 1, + ADD_RRR_0_OPCODE_Y1 = 1, + ANDI_IMM8_OPCODE_X0 = 3, + ANDI_IMM8_OPCODE_X1 = 3, + ANDI_OPCODE_Y0 = 2, + ANDI_OPCODE_Y1 = 3, + AND_RRR_0_OPCODE_X0 = 4, + AND_RRR_0_OPCODE_X1 = 4, + AND_RRR_5_OPCODE_Y0 = 0, + AND_RRR_5_OPCODE_Y1 = 0, + BEQZT_BRANCH_OPCODE_X1 = 16, + BEQZ_BRANCH_OPCODE_X1 = 17, + BFEXTS_BF_OPCODE_X0 = 4, + BFEXTU_BF_OPCODE_X0 = 5, + BFINS_BF_OPCODE_X0 = 6, + BF_OPCODE_X0 = 3, + BGEZT_BRANCH_OPCODE_X1 = 18, + BGEZ_BRANCH_OPCODE_X1 = 19, + BGTZT_BRANCH_OPCODE_X1 = 20, + BGTZ_BRANCH_OPCODE_X1 = 21, + BLBCT_BRANCH_OPCODE_X1 = 22, + BLBC_BRANCH_OPCODE_X1 = 23, + BLBST_BRANCH_OPCODE_X1 = 24, + BLBS_BRANCH_OPCODE_X1 = 25, + BLEZT_BRANCH_OPCODE_X1 = 26, + BLEZ_BRANCH_OPCODE_X1 = 27, + BLTZT_BRANCH_OPCODE_X1 = 28, + BLTZ_BRANCH_OPCODE_X1 = 29, + BNEZT_BRANCH_OPCODE_X1 = 30, + BNEZ_BRANCH_OPCODE_X1 = 31, + BRANCH_OPCODE_X1 = 2, + CMOVEQZ_RRR_0_OPCODE_X0 = 5, + CMOVEQZ_RRR_4_OPCODE_Y0 = 0, + CMOVNEZ_RRR_0_OPCODE_X0 = 6, + CMOVNEZ_RRR_4_OPCODE_Y0 = 1, + CMPEQI_IMM8_OPCODE_X0 = 4, + CMPEQI_IMM8_OPCODE_X1 = 4, + CMPEQI_OPCODE_Y0 = 3, + CMPEQI_OPCODE_Y1 = 4, + CMPEQ_RRR_0_OPCODE_X0 = 7, + CMPEQ_RRR_0_OPCODE_X1 = 5, + CMPEQ_RRR_3_OPCODE_Y0 = 0, + CMPEQ_RRR_3_OPCODE_Y1 = 2, + CMPEXCH4_RRR_0_OPCODE_X1 = 6, + CMPEXCH_RRR_0_OPCODE_X1 = 7, + CMPLES_RRR_0_OPCODE_X0 = 8, + CMPLES_RRR_0_OPCODE_X1 = 8, + CMPLES_RRR_2_OPCODE_Y0 = 0, + CMPLES_RRR_2_OPCODE_Y1 = 0, + CMPLEU_RRR_0_OPCODE_X0 = 9, + CMPLEU_RRR_0_OPCODE_X1 = 9, + CMPLEU_RRR_2_OPCODE_Y0 = 1, + CMPLEU_RRR_2_OPCODE_Y1 = 1, + CMPLTSI_IMM8_OPCODE_X0 = 5, + CMPLTSI_IMM8_OPCODE_X1 = 5, + CMPLTSI_OPCODE_Y0 = 4, + CMPLTSI_OPCODE_Y1 = 5, + CMPLTS_RRR_0_OPCODE_X0 = 10, + CMPLTS_RRR_0_OPCODE_X1 = 10, + CMPLTS_RRR_2_OPCODE_Y0 = 2, + CMPLTS_RRR_2_OPCODE_Y1 = 2, + CMPLTUI_IMM8_OPCODE_X0 = 6, + CMPLTUI_IMM8_OPCODE_X1 = 6, + CMPLTU_RRR_0_OPCODE_X0 = 11, + CMPLTU_RRR_0_OPCODE_X1 = 11, + CMPLTU_RRR_2_OPCODE_Y0 = 3, + CMPLTU_RRR_2_OPCODE_Y1 = 3, + CMPNE_RRR_0_OPCODE_X0 = 12, + CMPNE_RRR_0_OPCODE_X1 = 12, + CMPNE_RRR_3_OPCODE_Y0 = 1, + CMPNE_RRR_3_OPCODE_Y1 = 3, + CMULAF_RRR_0_OPCODE_X0 = 13, + CMULA_RRR_0_OPCODE_X0 = 14, + CMULFR_RRR_0_OPCODE_X0 = 15, + CMULF_RRR_0_OPCODE_X0 = 16, + CMULHR_RRR_0_OPCODE_X0 = 17, + CMULH_RRR_0_OPCODE_X0 = 18, + CMUL_RRR_0_OPCODE_X0 = 19, + CNTLZ_UNARY_OPCODE_X0 = 1, + CNTLZ_UNARY_OPCODE_Y0 = 1, + CNTTZ_UNARY_OPCODE_X0 = 2, + CNTTZ_UNARY_OPCODE_Y0 = 2, + CRC32_32_RRR_0_OPCODE_X0 = 20, + CRC32_8_RRR_0_OPCODE_X0 = 21, + DBLALIGN2_RRR_0_OPCODE_X0 = 22, + DBLALIGN2_RRR_0_OPCODE_X1 = 13, + DBLALIGN4_RRR_0_OPCODE_X0 = 23, + DBLALIGN4_RRR_0_OPCODE_X1 = 14, + DBLALIGN6_RRR_0_OPCODE_X0 = 24, + DBLALIGN6_RRR_0_OPCODE_X1 = 15, + DBLALIGN_RRR_0_OPCODE_X0 = 25, + DRAIN_UNARY_OPCODE_X1 = 1, + DTLBPR_UNARY_OPCODE_X1 = 2, + EXCH4_RRR_0_OPCODE_X1 = 16, + EXCH_RRR_0_OPCODE_X1 = 17, + FDOUBLE_ADDSUB_RRR_0_OPCODE_X0 = 26, + FDOUBLE_ADD_FLAGS_RRR_0_OPCODE_X0 = 27, + FDOUBLE_MUL_FLAGS_RRR_0_OPCODE_X0 = 28, + FDOUBLE_PACK1_RRR_0_OPCODE_X0 = 29, + FDOUBLE_PACK2_RRR_0_OPCODE_X0 = 30, + FDOUBLE_SUB_FLAGS_RRR_0_OPCODE_X0 = 31, + FDOUBLE_UNPACK_MAX_RRR_0_OPCODE_X0 = 32, + FDOUBLE_UNPACK_MIN_RRR_0_OPCODE_X0 = 33, + FETCHADD4_RRR_0_OPCODE_X1 = 18, + FETCHADDGEZ4_RRR_0_OPCODE_X1 = 19, + FETCHADDGEZ_RRR_0_OPCODE_X1 = 20, + FETCHADD_RRR_0_OPCODE_X1 = 21, + FETCHAND4_RRR_0_OPCODE_X1 = 22, + FETCHAND_RRR_0_OPCODE_X1 = 23, + FETCHOR4_RRR_0_OPCODE_X1 = 24, + FETCHOR_RRR_0_OPCODE_X1 = 25, + FINV_UNARY_OPCODE_X1 = 3, + FLUSHWB_UNARY_OPCODE_X1 = 4, + FLUSH_UNARY_OPCODE_X1 = 5, + FNOP_UNARY_OPCODE_X0 = 3, + FNOP_UNARY_OPCODE_X1 = 6, + FNOP_UNARY_OPCODE_Y0 = 3, + FNOP_UNARY_OPCODE_Y1 = 8, + FSINGLE_ADD1_RRR_0_OPCODE_X0 = 34, + FSINGLE_ADDSUB2_RRR_0_OPCODE_X0 = 35, + FSINGLE_MUL1_RRR_0_OPCODE_X0 = 36, + FSINGLE_MUL2_RRR_0_OPCODE_X0 = 37, + FSINGLE_PACK1_UNARY_OPCODE_X0 = 4, + FSINGLE_PACK1_UNARY_OPCODE_Y0 = 4, + FSINGLE_PACK2_RRR_0_OPCODE_X0 = 38, + FSINGLE_SUB1_RRR_0_OPCODE_X0 = 39, + ICOH_UNARY_OPCODE_X1 = 7, + ILL_UNARY_OPCODE_X1 = 8, + ILL_UNARY_OPCODE_Y1 = 9, + IMM8_OPCODE_X0 = 4, + IMM8_OPCODE_X1 = 3, + INV_UNARY_OPCODE_X1 = 9, + IRET_UNARY_OPCODE_X1 = 10, + JALRP_UNARY_OPCODE_X1 = 11, + JALRP_UNARY_OPCODE_Y1 = 10, + JALR_UNARY_OPCODE_X1 = 12, + JALR_UNARY_OPCODE_Y1 = 11, + JAL_JUMP_OPCODE_X1 = 0, + JRP_UNARY_OPCODE_X1 = 13, + JRP_UNARY_OPCODE_Y1 = 12, + JR_UNARY_OPCODE_X1 = 14, + JR_UNARY_OPCODE_Y1 = 13, + JUMP_OPCODE_X1 = 4, + J_JUMP_OPCODE_X1 = 1, + LD1S_ADD_IMM8_OPCODE_X1 = 7, + LD1S_OPCODE_Y2 = 0, + LD1S_UNARY_OPCODE_X1 = 15, + LD1U_ADD_IMM8_OPCODE_X1 = 8, + LD1U_OPCODE_Y2 = 1, + LD1U_UNARY_OPCODE_X1 = 16, + LD2S_ADD_IMM8_OPCODE_X1 = 9, + LD2S_OPCODE_Y2 = 2, + LD2S_UNARY_OPCODE_X1 = 17, + LD2U_ADD_IMM8_OPCODE_X1 = 10, + LD2U_OPCODE_Y2 = 3, + LD2U_UNARY_OPCODE_X1 = 18, + LD4S_ADD_IMM8_OPCODE_X1 = 11, + LD4S_OPCODE_Y2 = 1, + LD4S_UNARY_OPCODE_X1 = 19, + LD4U_ADD_IMM8_OPCODE_X1 = 12, + LD4U_OPCODE_Y2 = 2, + LD4U_UNARY_OPCODE_X1 = 20, + LDNA_UNARY_OPCODE_X1 = 21, + LDNT1S_ADD_IMM8_OPCODE_X1 = 13, + LDNT1S_UNARY_OPCODE_X1 = 22, + LDNT1U_ADD_IMM8_OPCODE_X1 = 14, + LDNT1U_UNARY_OPCODE_X1 = 23, + LDNT2S_ADD_IMM8_OPCODE_X1 = 15, + LDNT2S_UNARY_OPCODE_X1 = 24, + LDNT2U_ADD_IMM8_OPCODE_X1 = 16, + LDNT2U_UNARY_OPCODE_X1 = 25, + LDNT4S_ADD_IMM8_OPCODE_X1 = 17, + LDNT4S_UNARY_OPCODE_X1 = 26, + LDNT4U_ADD_IMM8_OPCODE_X1 = 18, + LDNT4U_UNARY_OPCODE_X1 = 27, + LDNT_ADD_IMM8_OPCODE_X1 = 19, + LDNT_UNARY_OPCODE_X1 = 28, + LD_ADD_IMM8_OPCODE_X1 = 20, + LD_OPCODE_Y2 = 3, + LD_UNARY_OPCODE_X1 = 29, + LNK_UNARY_OPCODE_X1 = 30, + LNK_UNARY_OPCODE_Y1 = 14, + LDNA_ADD_IMM8_OPCODE_X1 = 21, + MFSPR_IMM8_OPCODE_X1 = 22, + MF_UNARY_OPCODE_X1 = 31, + MM_BF_OPCODE_X0 = 7, + MNZ_RRR_0_OPCODE_X0 = 40, + MNZ_RRR_0_OPCODE_X1 = 26, + MNZ_RRR_4_OPCODE_Y0 = 2, + MNZ_RRR_4_OPCODE_Y1 = 2, + MODE_OPCODE_YA2 = 1, + MODE_OPCODE_YB2 = 2, + MODE_OPCODE_YC2 = 3, + MTSPR_IMM8_OPCODE_X1 = 23, + MULAX_RRR_0_OPCODE_X0 = 41, + MULAX_RRR_3_OPCODE_Y0 = 2, + MULA_HS_HS_RRR_0_OPCODE_X0 = 42, + MULA_HS_HS_RRR_9_OPCODE_Y0 = 0, + MULA_HS_HU_RRR_0_OPCODE_X0 = 43, + MULA_HS_LS_RRR_0_OPCODE_X0 = 44, + MULA_HS_LU_RRR_0_OPCODE_X0 = 45, + MULA_HU_HU_RRR_0_OPCODE_X0 = 46, + MULA_HU_HU_RRR_9_OPCODE_Y0 = 1, + MULA_HU_LS_RRR_0_OPCODE_X0 = 47, + MULA_HU_LU_RRR_0_OPCODE_X0 = 48, + MULA_LS_LS_RRR_0_OPCODE_X0 = 49, + MULA_LS_LS_RRR_9_OPCODE_Y0 = 2, + MULA_LS_LU_RRR_0_OPCODE_X0 = 50, + MULA_LU_LU_RRR_0_OPCODE_X0 = 51, + MULA_LU_LU_RRR_9_OPCODE_Y0 = 3, + MULX_RRR_0_OPCODE_X0 = 52, + MULX_RRR_3_OPCODE_Y0 = 3, + MUL_HS_HS_RRR_0_OPCODE_X0 = 53, + MUL_HS_HS_RRR_8_OPCODE_Y0 = 0, + MUL_HS_HU_RRR_0_OPCODE_X0 = 54, + MUL_HS_LS_RRR_0_OPCODE_X0 = 55, + MUL_HS_LU_RRR_0_OPCODE_X0 = 56, + MUL_HU_HU_RRR_0_OPCODE_X0 = 57, + MUL_HU_HU_RRR_8_OPCODE_Y0 = 1, + MUL_HU_LS_RRR_0_OPCODE_X0 = 58, + MUL_HU_LU_RRR_0_OPCODE_X0 = 59, + MUL_LS_LS_RRR_0_OPCODE_X0 = 60, + MUL_LS_LS_RRR_8_OPCODE_Y0 = 2, + MUL_LS_LU_RRR_0_OPCODE_X0 = 61, + MUL_LU_LU_RRR_0_OPCODE_X0 = 62, + MUL_LU_LU_RRR_8_OPCODE_Y0 = 3, + MZ_RRR_0_OPCODE_X0 = 63, + MZ_RRR_0_OPCODE_X1 = 27, + MZ_RRR_4_OPCODE_Y0 = 3, + MZ_RRR_4_OPCODE_Y1 = 3, + NAP_UNARY_OPCODE_X1 = 32, + NOP_UNARY_OPCODE_X0 = 5, + NOP_UNARY_OPCODE_X1 = 33, + NOP_UNARY_OPCODE_Y0 = 5, + NOP_UNARY_OPCODE_Y1 = 15, + NOR_RRR_0_OPCODE_X0 = 64, + NOR_RRR_0_OPCODE_X1 = 28, + NOR_RRR_5_OPCODE_Y0 = 1, + NOR_RRR_5_OPCODE_Y1 = 1, + ORI_IMM8_OPCODE_X0 = 7, + ORI_IMM8_OPCODE_X1 = 24, + OR_RRR_0_OPCODE_X0 = 65, + OR_RRR_0_OPCODE_X1 = 29, + OR_RRR_5_OPCODE_Y0 = 2, + OR_RRR_5_OPCODE_Y1 = 2, + PCNT_UNARY_OPCODE_X0 = 6, + PCNT_UNARY_OPCODE_Y0 = 6, + REVBITS_UNARY_OPCODE_X0 = 7, + REVBITS_UNARY_OPCODE_Y0 = 7, + REVBYTES_UNARY_OPCODE_X0 = 8, + REVBYTES_UNARY_OPCODE_Y0 = 8, + ROTLI_SHIFT_OPCODE_X0 = 1, + ROTLI_SHIFT_OPCODE_X1 = 1, + ROTLI_SHIFT_OPCODE_Y0 = 0, + ROTLI_SHIFT_OPCODE_Y1 = 0, + ROTL_RRR_0_OPCODE_X0 = 66, + ROTL_RRR_0_OPCODE_X1 = 30, + ROTL_RRR_6_OPCODE_Y0 = 0, + ROTL_RRR_6_OPCODE_Y1 = 0, + RRR_0_OPCODE_X0 = 5, + RRR_0_OPCODE_X1 = 5, + RRR_0_OPCODE_Y0 = 5, + RRR_0_OPCODE_Y1 = 6, + RRR_1_OPCODE_Y0 = 6, + RRR_1_OPCODE_Y1 = 7, + RRR_2_OPCODE_Y0 = 7, + RRR_2_OPCODE_Y1 = 8, + RRR_3_OPCODE_Y0 = 8, + RRR_3_OPCODE_Y1 = 9, + RRR_4_OPCODE_Y0 = 9, + RRR_4_OPCODE_Y1 = 10, + RRR_5_OPCODE_Y0 = 10, + RRR_5_OPCODE_Y1 = 11, + RRR_6_OPCODE_Y0 = 11, + RRR_6_OPCODE_Y1 = 12, + RRR_7_OPCODE_Y0 = 12, + RRR_7_OPCODE_Y1 = 13, + RRR_8_OPCODE_Y0 = 13, + RRR_9_OPCODE_Y0 = 14, + SHIFT_OPCODE_X0 = 6, + SHIFT_OPCODE_X1 = 6, + SHIFT_OPCODE_Y0 = 15, + SHIFT_OPCODE_Y1 = 14, + SHL16INSLI_OPCODE_X0 = 7, + SHL16INSLI_OPCODE_X1 = 7, + SHL1ADDX_RRR_0_OPCODE_X0 = 67, + SHL1ADDX_RRR_0_OPCODE_X1 = 31, + SHL1ADDX_RRR_7_OPCODE_Y0 = 1, + SHL1ADDX_RRR_7_OPCODE_Y1 = 1, + SHL1ADD_RRR_0_OPCODE_X0 = 68, + SHL1ADD_RRR_0_OPCODE_X1 = 32, + SHL1ADD_RRR_1_OPCODE_Y0 = 0, + SHL1ADD_RRR_1_OPCODE_Y1 = 0, + SHL2ADDX_RRR_0_OPCODE_X0 = 69, + SHL2ADDX_RRR_0_OPCODE_X1 = 33, + SHL2ADDX_RRR_7_OPCODE_Y0 = 2, + SHL2ADDX_RRR_7_OPCODE_Y1 = 2, + SHL2ADD_RRR_0_OPCODE_X0 = 70, + SHL2ADD_RRR_0_OPCODE_X1 = 34, + SHL2ADD_RRR_1_OPCODE_Y0 = 1, + SHL2ADD_RRR_1_OPCODE_Y1 = 1, + SHL3ADDX_RRR_0_OPCODE_X0 = 71, + SHL3ADDX_RRR_0_OPCODE_X1 = 35, + SHL3ADDX_RRR_7_OPCODE_Y0 = 3, + SHL3ADDX_RRR_7_OPCODE_Y1 = 3, + SHL3ADD_RRR_0_OPCODE_X0 = 72, + SHL3ADD_RRR_0_OPCODE_X1 = 36, + SHL3ADD_RRR_1_OPCODE_Y0 = 2, + SHL3ADD_RRR_1_OPCODE_Y1 = 2, + SHLI_SHIFT_OPCODE_X0 = 2, + SHLI_SHIFT_OPCODE_X1 = 2, + SHLI_SHIFT_OPCODE_Y0 = 1, + SHLI_SHIFT_OPCODE_Y1 = 1, + SHLXI_SHIFT_OPCODE_X0 = 3, + SHLXI_SHIFT_OPCODE_X1 = 3, + SHLX_RRR_0_OPCODE_X0 = 73, + SHLX_RRR_0_OPCODE_X1 = 37, + SHL_RRR_0_OPCODE_X0 = 74, + SHL_RRR_0_OPCODE_X1 = 38, + SHL_RRR_6_OPCODE_Y0 = 1, + SHL_RRR_6_OPCODE_Y1 = 1, + SHRSI_SHIFT_OPCODE_X0 = 4, + SHRSI_SHIFT_OPCODE_X1 = 4, + SHRSI_SHIFT_OPCODE_Y0 = 2, + SHRSI_SHIFT_OPCODE_Y1 = 2, + SHRS_RRR_0_OPCODE_X0 = 75, + SHRS_RRR_0_OPCODE_X1 = 39, + SHRS_RRR_6_OPCODE_Y0 = 2, + SHRS_RRR_6_OPCODE_Y1 = 2, + SHRUI_SHIFT_OPCODE_X0 = 5, + SHRUI_SHIFT_OPCODE_X1 = 5, + SHRUI_SHIFT_OPCODE_Y0 = 3, + SHRUI_SHIFT_OPCODE_Y1 = 3, + SHRUXI_SHIFT_OPCODE_X0 = 6, + SHRUXI_SHIFT_OPCODE_X1 = 6, + SHRUX_RRR_0_OPCODE_X0 = 76, + SHRUX_RRR_0_OPCODE_X1 = 40, + SHRU_RRR_0_OPCODE_X0 = 77, + SHRU_RRR_0_OPCODE_X1 = 41, + SHRU_RRR_6_OPCODE_Y0 = 3, + SHRU_RRR_6_OPCODE_Y1 = 3, + SHUFFLEBYTES_RRR_0_OPCODE_X0 = 78, + ST1_ADD_IMM8_OPCODE_X1 = 25, + ST1_OPCODE_Y2 = 0, + ST1_RRR_0_OPCODE_X1 = 42, + ST2_ADD_IMM8_OPCODE_X1 = 26, + ST2_OPCODE_Y2 = 1, + ST2_RRR_0_OPCODE_X1 = 43, + ST4_ADD_IMM8_OPCODE_X1 = 27, + ST4_OPCODE_Y2 = 2, + ST4_RRR_0_OPCODE_X1 = 44, + STNT1_ADD_IMM8_OPCODE_X1 = 28, + STNT1_RRR_0_OPCODE_X1 = 45, + STNT2_ADD_IMM8_OPCODE_X1 = 29, + STNT2_RRR_0_OPCODE_X1 = 46, + STNT4_ADD_IMM8_OPCODE_X1 = 30, + STNT4_RRR_0_OPCODE_X1 = 47, + STNT_ADD_IMM8_OPCODE_X1 = 31, + STNT_RRR_0_OPCODE_X1 = 48, + ST_ADD_IMM8_OPCODE_X1 = 32, + ST_OPCODE_Y2 = 3, + ST_RRR_0_OPCODE_X1 = 49, + SUBXSC_RRR_0_OPCODE_X0 = 79, + SUBXSC_RRR_0_OPCODE_X1 = 50, + SUBX_RRR_0_OPCODE_X0 = 80, + SUBX_RRR_0_OPCODE_X1 = 51, + SUBX_RRR_0_OPCODE_Y0 = 2, + SUBX_RRR_0_OPCODE_Y1 = 2, + SUB_RRR_0_OPCODE_X0 = 81, + SUB_RRR_0_OPCODE_X1 = 52, + SUB_RRR_0_OPCODE_Y0 = 3, + SUB_RRR_0_OPCODE_Y1 = 3, + SWINT0_UNARY_OPCODE_X1 = 34, + SWINT1_UNARY_OPCODE_X1 = 35, + SWINT2_UNARY_OPCODE_X1 = 36, + SWINT3_UNARY_OPCODE_X1 = 37, + TBLIDXB0_UNARY_OPCODE_X0 = 9, + TBLIDXB0_UNARY_OPCODE_Y0 = 9, + TBLIDXB1_UNARY_OPCODE_X0 = 10, + TBLIDXB1_UNARY_OPCODE_Y0 = 10, + TBLIDXB2_UNARY_OPCODE_X0 = 11, + TBLIDXB2_UNARY_OPCODE_Y0 = 11, + TBLIDXB3_UNARY_OPCODE_X0 = 12, + TBLIDXB3_UNARY_OPCODE_Y0 = 12, + UNARY_RRR_0_OPCODE_X0 = 82, + UNARY_RRR_0_OPCODE_X1 = 53, + UNARY_RRR_1_OPCODE_Y0 = 3, + UNARY_RRR_1_OPCODE_Y1 = 3, + V1ADDI_IMM8_OPCODE_X0 = 8, + V1ADDI_IMM8_OPCODE_X1 = 33, + V1ADDUC_RRR_0_OPCODE_X0 = 83, + V1ADDUC_RRR_0_OPCODE_X1 = 54, + V1ADD_RRR_0_OPCODE_X0 = 84, + V1ADD_RRR_0_OPCODE_X1 = 55, + V1ADIFFU_RRR_0_OPCODE_X0 = 85, + V1AVGU_RRR_0_OPCODE_X0 = 86, + V1CMPEQI_IMM8_OPCODE_X0 = 9, + V1CMPEQI_IMM8_OPCODE_X1 = 34, + V1CMPEQ_RRR_0_OPCODE_X0 = 87, + V1CMPEQ_RRR_0_OPCODE_X1 = 56, + V1CMPLES_RRR_0_OPCODE_X0 = 88, + V1CMPLES_RRR_0_OPCODE_X1 = 57, + V1CMPLEU_RRR_0_OPCODE_X0 = 89, + V1CMPLEU_RRR_0_OPCODE_X1 = 58, + V1CMPLTSI_IMM8_OPCODE_X0 = 10, + V1CMPLTSI_IMM8_OPCODE_X1 = 35, + V1CMPLTS_RRR_0_OPCODE_X0 = 90, + V1CMPLTS_RRR_0_OPCODE_X1 = 59, + V1CMPLTUI_IMM8_OPCODE_X0 = 11, + V1CMPLTUI_IMM8_OPCODE_X1 = 36, + V1CMPLTU_RRR_0_OPCODE_X0 = 91, + V1CMPLTU_RRR_0_OPCODE_X1 = 60, + V1CMPNE_RRR_0_OPCODE_X0 = 92, + V1CMPNE_RRR_0_OPCODE_X1 = 61, + V1DDOTPUA_RRR_0_OPCODE_X0 = 161, + V1DDOTPUSA_RRR_0_OPCODE_X0 = 93, + V1DDOTPUS_RRR_0_OPCODE_X0 = 94, + V1DDOTPU_RRR_0_OPCODE_X0 = 162, + V1DOTPA_RRR_0_OPCODE_X0 = 95, + V1DOTPUA_RRR_0_OPCODE_X0 = 163, + V1DOTPUSA_RRR_0_OPCODE_X0 = 96, + V1DOTPUS_RRR_0_OPCODE_X0 = 97, + V1DOTPU_RRR_0_OPCODE_X0 = 164, + V1DOTP_RRR_0_OPCODE_X0 = 98, + V1INT_H_RRR_0_OPCODE_X0 = 99, + V1INT_H_RRR_0_OPCODE_X1 = 62, + V1INT_L_RRR_0_OPCODE_X0 = 100, + V1INT_L_RRR_0_OPCODE_X1 = 63, + V1MAXUI_IMM8_OPCODE_X0 = 12, + V1MAXUI_IMM8_OPCODE_X1 = 37, + V1MAXU_RRR_0_OPCODE_X0 = 101, + V1MAXU_RRR_0_OPCODE_X1 = 64, + V1MINUI_IMM8_OPCODE_X0 = 13, + V1MINUI_IMM8_OPCODE_X1 = 38, + V1MINU_RRR_0_OPCODE_X0 = 102, + V1MINU_RRR_0_OPCODE_X1 = 65, + V1MNZ_RRR_0_OPCODE_X0 = 103, + V1MNZ_RRR_0_OPCODE_X1 = 66, + V1MULTU_RRR_0_OPCODE_X0 = 104, + V1MULUS_RRR_0_OPCODE_X0 = 105, + V1MULU_RRR_0_OPCODE_X0 = 106, + V1MZ_RRR_0_OPCODE_X0 = 107, + V1MZ_RRR_0_OPCODE_X1 = 67, + V1SADAU_RRR_0_OPCODE_X0 = 108, + V1SADU_RRR_0_OPCODE_X0 = 109, + V1SHLI_SHIFT_OPCODE_X0 = 7, + V1SHLI_SHIFT_OPCODE_X1 = 7, + V1SHL_RRR_0_OPCODE_X0 = 110, + V1SHL_RRR_0_OPCODE_X1 = 68, + V1SHRSI_SHIFT_OPCODE_X0 = 8, + V1SHRSI_SHIFT_OPCODE_X1 = 8, + V1SHRS_RRR_0_OPCODE_X0 = 111, + V1SHRS_RRR_0_OPCODE_X1 = 69, + V1SHRUI_SHIFT_OPCODE_X0 = 9, + V1SHRUI_SHIFT_OPCODE_X1 = 9, + V1SHRU_RRR_0_OPCODE_X0 = 112, + V1SHRU_RRR_0_OPCODE_X1 = 70, + V1SUBUC_RRR_0_OPCODE_X0 = 113, + V1SUBUC_RRR_0_OPCODE_X1 = 71, + V1SUB_RRR_0_OPCODE_X0 = 114, + V1SUB_RRR_0_OPCODE_X1 = 72, + V2ADDI_IMM8_OPCODE_X0 = 14, + V2ADDI_IMM8_OPCODE_X1 = 39, + V2ADDSC_RRR_0_OPCODE_X0 = 115, + V2ADDSC_RRR_0_OPCODE_X1 = 73, + V2ADD_RRR_0_OPCODE_X0 = 116, + V2ADD_RRR_0_OPCODE_X1 = 74, + V2ADIFFS_RRR_0_OPCODE_X0 = 117, + V2AVGS_RRR_0_OPCODE_X0 = 118, + V2CMPEQI_IMM8_OPCODE_X0 = 15, + V2CMPEQI_IMM8_OPCODE_X1 = 40, + V2CMPEQ_RRR_0_OPCODE_X0 = 119, + V2CMPEQ_RRR_0_OPCODE_X1 = 75, + V2CMPLES_RRR_0_OPCODE_X0 = 120, + V2CMPLES_RRR_0_OPCODE_X1 = 76, + V2CMPLEU_RRR_0_OPCODE_X0 = 121, + V2CMPLEU_RRR_0_OPCODE_X1 = 77, + V2CMPLTSI_IMM8_OPCODE_X0 = 16, + V2CMPLTSI_IMM8_OPCODE_X1 = 41, + V2CMPLTS_RRR_0_OPCODE_X0 = 122, + V2CMPLTS_RRR_0_OPCODE_X1 = 78, + V2CMPLTUI_IMM8_OPCODE_X0 = 17, + V2CMPLTUI_IMM8_OPCODE_X1 = 42, + V2CMPLTU_RRR_0_OPCODE_X0 = 123, + V2CMPLTU_RRR_0_OPCODE_X1 = 79, + V2CMPNE_RRR_0_OPCODE_X0 = 124, + V2CMPNE_RRR_0_OPCODE_X1 = 80, + V2DOTPA_RRR_0_OPCODE_X0 = 125, + V2DOTP_RRR_0_OPCODE_X0 = 126, + V2INT_H_RRR_0_OPCODE_X0 = 127, + V2INT_H_RRR_0_OPCODE_X1 = 81, + V2INT_L_RRR_0_OPCODE_X0 = 128, + V2INT_L_RRR_0_OPCODE_X1 = 82, + V2MAXSI_IMM8_OPCODE_X0 = 18, + V2MAXSI_IMM8_OPCODE_X1 = 43, + V2MAXS_RRR_0_OPCODE_X0 = 129, + V2MAXS_RRR_0_OPCODE_X1 = 83, + V2MINSI_IMM8_OPCODE_X0 = 19, + V2MINSI_IMM8_OPCODE_X1 = 44, + V2MINS_RRR_0_OPCODE_X0 = 130, + V2MINS_RRR_0_OPCODE_X1 = 84, + V2MNZ_RRR_0_OPCODE_X0 = 131, + V2MNZ_RRR_0_OPCODE_X1 = 85, + V2MULFSC_RRR_0_OPCODE_X0 = 132, + V2MULS_RRR_0_OPCODE_X0 = 133, + V2MULTS_RRR_0_OPCODE_X0 = 134, + V2MZ_RRR_0_OPCODE_X0 = 135, + V2MZ_RRR_0_OPCODE_X1 = 86, + V2PACKH_RRR_0_OPCODE_X0 = 136, + V2PACKH_RRR_0_OPCODE_X1 = 87, + V2PACKL_RRR_0_OPCODE_X0 = 137, + V2PACKL_RRR_0_OPCODE_X1 = 88, + V2PACKUC_RRR_0_OPCODE_X0 = 138, + V2PACKUC_RRR_0_OPCODE_X1 = 89, + V2SADAS_RRR_0_OPCODE_X0 = 139, + V2SADAU_RRR_0_OPCODE_X0 = 140, + V2SADS_RRR_0_OPCODE_X0 = 141, + V2SADU_RRR_0_OPCODE_X0 = 142, + V2SHLI_SHIFT_OPCODE_X0 = 10, + V2SHLI_SHIFT_OPCODE_X1 = 10, + V2SHLSC_RRR_0_OPCODE_X0 = 143, + V2SHLSC_RRR_0_OPCODE_X1 = 90, + V2SHL_RRR_0_OPCODE_X0 = 144, + V2SHL_RRR_0_OPCODE_X1 = 91, + V2SHRSI_SHIFT_OPCODE_X0 = 11, + V2SHRSI_SHIFT_OPCODE_X1 = 11, + V2SHRS_RRR_0_OPCODE_X0 = 145, + V2SHRS_RRR_0_OPCODE_X1 = 92, + V2SHRUI_SHIFT_OPCODE_X0 = 12, + V2SHRUI_SHIFT_OPCODE_X1 = 12, + V2SHRU_RRR_0_OPCODE_X0 = 146, + V2SHRU_RRR_0_OPCODE_X1 = 93, + V2SUBSC_RRR_0_OPCODE_X0 = 147, + V2SUBSC_RRR_0_OPCODE_X1 = 94, + V2SUB_RRR_0_OPCODE_X0 = 148, + V2SUB_RRR_0_OPCODE_X1 = 95, + V4ADDSC_RRR_0_OPCODE_X0 = 149, + V4ADDSC_RRR_0_OPCODE_X1 = 96, + V4ADD_RRR_0_OPCODE_X0 = 150, + V4ADD_RRR_0_OPCODE_X1 = 97, + V4INT_H_RRR_0_OPCODE_X0 = 151, + V4INT_H_RRR_0_OPCODE_X1 = 98, + V4INT_L_RRR_0_OPCODE_X0 = 152, + V4INT_L_RRR_0_OPCODE_X1 = 99, + V4PACKSC_RRR_0_OPCODE_X0 = 153, + V4PACKSC_RRR_0_OPCODE_X1 = 100, + V4SHLSC_RRR_0_OPCODE_X0 = 154, + V4SHLSC_RRR_0_OPCODE_X1 = 101, + V4SHL_RRR_0_OPCODE_X0 = 155, + V4SHL_RRR_0_OPCODE_X1 = 102, + V4SHRS_RRR_0_OPCODE_X0 = 156, + V4SHRS_RRR_0_OPCODE_X1 = 103, + V4SHRU_RRR_0_OPCODE_X0 = 157, + V4SHRU_RRR_0_OPCODE_X1 = 104, + V4SUBSC_RRR_0_OPCODE_X0 = 158, + V4SUBSC_RRR_0_OPCODE_X1 = 105, + V4SUB_RRR_0_OPCODE_X0 = 159, + V4SUB_RRR_0_OPCODE_X1 = 106, + WH64_UNARY_OPCODE_X1 = 38, + XORI_IMM8_OPCODE_X0 = 20, + XORI_IMM8_OPCODE_X1 = 45, + XOR_RRR_0_OPCODE_X0 = 160, + XOR_RRR_0_OPCODE_X1 = 107, + XOR_RRR_5_OPCODE_Y0 = 3, + XOR_RRR_5_OPCODE_Y1 = 3 +}; + + +#endif /* __ASSEMBLER__ */ + +#endif /* __ARCH_OPCODE_H__ */ diff --git a/qemu/target-tilegx/simd_helper.c b/qemu/target-tilegx/simd_helper.c new file mode 100644 index 000000000..2d40ddb63 --- /dev/null +++ b/qemu/target-tilegx/simd_helper.c @@ -0,0 +1,166 @@ +/* + * QEMU TILE-Gx helpers + * + * Copyright (c) 2015 Chen Gang + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "qemu-common.h" +#include "exec/helper-proto.h" + + +/* Broadcast a value to all elements of a vector. */ +#define V1(X) (((X) & 0xff) * 0x0101010101010101ull) +#define V2(X) (((X) & 0xffff) * 0x0001000100010001ull) + + +uint64_t helper_v1multu(uint64_t a, uint64_t b) +{ + uint64_t r = 0; + int i; + + for (i = 0; i < 64; i += 8) { + unsigned ae = extract64(a, i, 8); + unsigned be = extract64(b, i, 8); + r = deposit64(r, i, 8, ae * be); + } + return r; +} + +uint64_t helper_v2mults(uint64_t a, uint64_t b) +{ + uint64_t r = 0; + int i; + + /* While the instruction talks about signed inputs, with a + truncated result the sign of the inputs doesn't matter. */ + for (i = 0; i < 64; i += 16) { + unsigned ae = extract64(a, i, 16); + unsigned be = extract64(b, i, 16); + r = deposit64(r, i, 16, ae * be); + } + return r; +} + +uint64_t helper_v1shl(uint64_t a, uint64_t b) +{ + uint64_t m; + + b &= 7; + m = V1(0xff >> b); + return (a & m) << b; +} + +uint64_t helper_v2shl(uint64_t a, uint64_t b) +{ + uint64_t m; + + b &= 15; + m = V2(0xffff >> b); + return (a & m) << b; +} + +uint64_t helper_v1shru(uint64_t a, uint64_t b) +{ + uint64_t m; + + b &= 7; + m = V1(0xff << b); + return (a & m) >> b; +} + +uint64_t helper_v2shru(uint64_t a, uint64_t b) +{ + uint64_t m; + + b &= 15; + m = V2(0xffff << b); + return (a & m) >> b; +} + +uint64_t helper_v1shrs(uint64_t a, uint64_t b) +{ + uint64_t r = 0; + int i; + + b &= 7; + for (i = 0; i < 64; i += 8) { + r = deposit64(r, i, 8, sextract64(a, i + b, 8 - b)); + } + return r; +} + +uint64_t helper_v2shrs(uint64_t a, uint64_t b) +{ + uint64_t r = 0; + int i; + + b &= 15; + for (i = 0; i < 64; i += 16) { + r = deposit64(r, i, 16, sextract64(a, i + b, 16 - b)); + } + return r; +} + +uint64_t helper_v1int_h(uint64_t a, uint64_t b) +{ + uint64_t r = 0; + int i; + + for (i = 0; i < 32; i += 8) { + r = deposit64(r, 2 * i + 8, 8, extract64(a, i + 32, 8)); + r = deposit64(r, 2 * i, 8, extract64(b, i + 32, 8)); + } + return r; +} + +uint64_t helper_v1int_l(uint64_t a, uint64_t b) +{ + uint64_t r = 0; + int i; + + for (i = 0; i < 32; i += 8) { + r = deposit64(r, 2 * i + 8, 8, extract64(a, i, 8)); + r = deposit64(r, 2 * i, 8, extract64(b, i, 8)); + } + return r; +} + +uint64_t helper_v2int_h(uint64_t a, uint64_t b) +{ + uint64_t r = 0; + int i; + + for (i = 0; i < 32; i += 16) { + r = deposit64(r, 2 * i + 16, 16, extract64(a, i + 32, 16)); + r = deposit64(r, 2 * i, 16, extract64(b, i + 32, 16)); + } + return r; +} + +uint64_t helper_v2int_l(uint64_t a, uint64_t b) +{ + uint64_t r = 0; + int i; + + for (i = 0; i < 32; i += 16) { + r = deposit64(r, 2 * i + 16, 16, extract64(a, i, 16)); + r = deposit64(r, 2 * i, 16, extract64(b, i, 16)); + } + return r; +} diff --git a/qemu/target-tilegx/spr_def_64.h b/qemu/target-tilegx/spr_def_64.h new file mode 100644 index 000000000..67a6c1751 --- /dev/null +++ b/qemu/target-tilegx/spr_def_64.h @@ -0,0 +1,216 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#ifndef __DOXYGEN__ + +#ifndef __ARCH_SPR_DEF_64_H__ +#define __ARCH_SPR_DEF_64_H__ + +#define SPR_AUX_PERF_COUNT_0 0x2105 +#define SPR_AUX_PERF_COUNT_1 0x2106 +#define SPR_AUX_PERF_COUNT_CTL 0x2107 +#define SPR_AUX_PERF_COUNT_STS 0x2108 +#define SPR_CMPEXCH_VALUE 0x2780 +#define SPR_CYCLE 0x2781 +#define SPR_DONE 0x2705 +#define SPR_DSTREAM_PF 0x2706 +#define SPR_EVENT_BEGIN 0x2782 +#define SPR_EVENT_END 0x2783 +#define SPR_EX_CONTEXT_0_0 0x2580 +#define SPR_EX_CONTEXT_0_1 0x2581 +#define SPR_EX_CONTEXT_0_1__PL_SHIFT 0 +#define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3 +#define SPR_EX_CONTEXT_0_1__PL_MASK 0x3 +#define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2 +#define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1 +#define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4 +#define SPR_EX_CONTEXT_1_0 0x2480 +#define SPR_EX_CONTEXT_1_1 0x2481 +#define SPR_EX_CONTEXT_1_1__PL_SHIFT 0 +#define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3 +#define SPR_EX_CONTEXT_1_1__PL_MASK 0x3 +#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2 +#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1 +#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4 +#define SPR_EX_CONTEXT_2_0 0x2380 +#define SPR_EX_CONTEXT_2_1 0x2381 +#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0 +#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3 +#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3 +#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2 +#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1 +#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4 +#define SPR_FAIL 0x2707 +#define SPR_IDN_AVAIL_EN 0x1a05 +#define SPR_IDN_DATA_AVAIL 0x0a80 +#define SPR_IDN_DEADLOCK_TIMEOUT 0x1806 +#define SPR_IDN_DEMUX_COUNT_0 0x0a05 +#define SPR_IDN_DEMUX_COUNT_1 0x0a06 +#define SPR_IDN_DIRECTION_PROTECT 0x1405 +#define SPR_IDN_PENDING 0x0a08 +#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1 +#define SPR_INTCTRL_0_STATUS 0x2505 +#define SPR_INTCTRL_1_STATUS 0x2405 +#define SPR_INTCTRL_2_STATUS 0x2305 +#define SPR_INTERRUPT_CRITICAL_SECTION 0x2708 +#define SPR_INTERRUPT_MASK_0 0x2506 +#define SPR_INTERRUPT_MASK_1 0x2406 +#define SPR_INTERRUPT_MASK_2 0x2306 +#define SPR_INTERRUPT_MASK_RESET_0 0x2507 +#define SPR_INTERRUPT_MASK_RESET_1 0x2407 +#define SPR_INTERRUPT_MASK_RESET_2 0x2307 +#define SPR_INTERRUPT_MASK_SET_0 0x2508 +#define SPR_INTERRUPT_MASK_SET_1 0x2408 +#define SPR_INTERRUPT_MASK_SET_2 0x2308 +#define SPR_INTERRUPT_VECTOR_BASE_0 0x2509 +#define SPR_INTERRUPT_VECTOR_BASE_1 0x2409 +#define SPR_INTERRUPT_VECTOR_BASE_2 0x2309 +#define SPR_INTERRUPT_VECTOR_BASE_3 0x2209 +#define SPR_IPI_EVENT_0 0x1f05 +#define SPR_IPI_EVENT_1 0x1e05 +#define SPR_IPI_EVENT_2 0x1d05 +#define SPR_IPI_EVENT_RESET_0 0x1f06 +#define SPR_IPI_EVENT_RESET_1 0x1e06 +#define SPR_IPI_EVENT_RESET_2 0x1d06 +#define SPR_IPI_EVENT_SET_0 0x1f07 +#define SPR_IPI_EVENT_SET_1 0x1e07 +#define SPR_IPI_EVENT_SET_2 0x1d07 +#define SPR_IPI_MASK_0 0x1f08 +#define SPR_IPI_MASK_1 0x1e08 +#define SPR_IPI_MASK_2 0x1d08 +#define SPR_IPI_MASK_RESET_0 0x1f09 +#define SPR_IPI_MASK_RESET_1 0x1e09 +#define SPR_IPI_MASK_RESET_2 0x1d09 +#define SPR_IPI_MASK_SET_0 0x1f0a +#define SPR_IPI_MASK_SET_1 0x1e0a +#define SPR_IPI_MASK_SET_2 0x1d0a +#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x2100 +#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x2101 +#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x2102 +#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700 +#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701 +#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702 +#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00 +#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01 +#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02 +#define SPR_MPL_IDN_AVAIL_SET_0 0x1a00 +#define SPR_MPL_IDN_AVAIL_SET_1 0x1a01 +#define SPR_MPL_IDN_AVAIL_SET_2 0x1a02 +#define SPR_MPL_IDN_COMPLETE_SET_0 0x0500 +#define SPR_MPL_IDN_COMPLETE_SET_1 0x0501 +#define SPR_MPL_IDN_COMPLETE_SET_2 0x0502 +#define SPR_MPL_IDN_FIREWALL_SET_0 0x1400 +#define SPR_MPL_IDN_FIREWALL_SET_1 0x1401 +#define SPR_MPL_IDN_FIREWALL_SET_2 0x1402 +#define SPR_MPL_IDN_TIMER_SET_0 0x1800 +#define SPR_MPL_IDN_TIMER_SET_1 0x1801 +#define SPR_MPL_IDN_TIMER_SET_2 0x1802 +#define SPR_MPL_INTCTRL_0_SET_0 0x2500 +#define SPR_MPL_INTCTRL_0_SET_1 0x2501 +#define SPR_MPL_INTCTRL_0_SET_2 0x2502 +#define SPR_MPL_INTCTRL_1_SET_0 0x2400 +#define SPR_MPL_INTCTRL_1_SET_1 0x2401 +#define SPR_MPL_INTCTRL_1_SET_2 0x2402 +#define SPR_MPL_INTCTRL_2_SET_0 0x2300 +#define SPR_MPL_INTCTRL_2_SET_1 0x2301 +#define SPR_MPL_INTCTRL_2_SET_2 0x2302 +#define SPR_MPL_IPI_0 0x1f04 +#define SPR_MPL_IPI_0_SET_0 0x1f00 +#define SPR_MPL_IPI_0_SET_1 0x1f01 +#define SPR_MPL_IPI_0_SET_2 0x1f02 +#define SPR_MPL_IPI_1 0x1e04 +#define SPR_MPL_IPI_1_SET_0 0x1e00 +#define SPR_MPL_IPI_1_SET_1 0x1e01 +#define SPR_MPL_IPI_1_SET_2 0x1e02 +#define SPR_MPL_IPI_2 0x1d04 +#define SPR_MPL_IPI_2_SET_0 0x1d00 +#define SPR_MPL_IPI_2_SET_1 0x1d01 +#define SPR_MPL_IPI_2_SET_2 0x1d02 +#define SPR_MPL_PERF_COUNT_SET_0 0x2000 +#define SPR_MPL_PERF_COUNT_SET_1 0x2001 +#define SPR_MPL_PERF_COUNT_SET_2 0x2002 +#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00 +#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01 +#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02 +#define SPR_MPL_UDN_AVAIL_SET_0 0x1b00 +#define SPR_MPL_UDN_AVAIL_SET_1 0x1b01 +#define SPR_MPL_UDN_AVAIL_SET_2 0x1b02 +#define SPR_MPL_UDN_COMPLETE_SET_0 0x0600 +#define SPR_MPL_UDN_COMPLETE_SET_1 0x0601 +#define SPR_MPL_UDN_COMPLETE_SET_2 0x0602 +#define SPR_MPL_UDN_FIREWALL_SET_0 0x1500 +#define SPR_MPL_UDN_FIREWALL_SET_1 0x1501 +#define SPR_MPL_UDN_FIREWALL_SET_2 0x1502 +#define SPR_MPL_UDN_TIMER_SET_0 0x1900 +#define SPR_MPL_UDN_TIMER_SET_1 0x1901 +#define SPR_MPL_UDN_TIMER_SET_2 0x1902 +#define SPR_MPL_WORLD_ACCESS_SET_0 0x2700 +#define SPR_MPL_WORLD_ACCESS_SET_1 0x2701 +#define SPR_MPL_WORLD_ACCESS_SET_2 0x2702 +#define SPR_PASS 0x2709 +#define SPR_PERF_COUNT_0 0x2005 +#define SPR_PERF_COUNT_1 0x2006 +#define SPR_PERF_COUNT_CTL 0x2007 +#define SPR_PERF_COUNT_DN_CTL 0x2008 +#define SPR_PERF_COUNT_STS 0x2009 +#define SPR_PROC_STATUS 0x2784 +#define SPR_SIM_CONTROL 0x2785 +#define SPR_SINGLE_STEP_CONTROL_0 0x0405 +#define SPR_SINGLE_STEP_CONTROL_0__CANCELED_MASK 0x1 +#define SPR_SINGLE_STEP_CONTROL_0__INHIBIT_MASK 0x2 +#define SPR_SINGLE_STEP_CONTROL_1 0x0305 +#define SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK 0x1 +#define SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK 0x2 +#define SPR_SINGLE_STEP_CONTROL_2 0x0205 +#define SPR_SINGLE_STEP_CONTROL_2__CANCELED_MASK 0x1 +#define SPR_SINGLE_STEP_CONTROL_2__INHIBIT_MASK 0x2 +#define SPR_SINGLE_STEP_EN_0_0 0x250a +#define SPR_SINGLE_STEP_EN_0_1 0x240a +#define SPR_SINGLE_STEP_EN_0_2 0x230a +#define SPR_SINGLE_STEP_EN_1_0 0x250b +#define SPR_SINGLE_STEP_EN_1_1 0x240b +#define SPR_SINGLE_STEP_EN_1_2 0x230b +#define SPR_SINGLE_STEP_EN_2_0 0x250c +#define SPR_SINGLE_STEP_EN_2_1 0x240c +#define SPR_SINGLE_STEP_EN_2_2 0x230c +#define SPR_SYSTEM_SAVE_0_0 0x2582 +#define SPR_SYSTEM_SAVE_0_1 0x2583 +#define SPR_SYSTEM_SAVE_0_2 0x2584 +#define SPR_SYSTEM_SAVE_0_3 0x2585 +#define SPR_SYSTEM_SAVE_1_0 0x2482 +#define SPR_SYSTEM_SAVE_1_1 0x2483 +#define SPR_SYSTEM_SAVE_1_2 0x2484 +#define SPR_SYSTEM_SAVE_1_3 0x2485 +#define SPR_SYSTEM_SAVE_2_0 0x2382 +#define SPR_SYSTEM_SAVE_2_1 0x2383 +#define SPR_SYSTEM_SAVE_2_2 0x2384 +#define SPR_SYSTEM_SAVE_2_3 0x2385 +#define SPR_TILE_COORD 0x270b +#define SPR_TILE_RTF_HWM 0x270c +#define SPR_TILE_TIMER_CONTROL 0x1605 +#define SPR_UDN_AVAIL_EN 0x1b05 +#define SPR_UDN_DATA_AVAIL 0x0b80 +#define SPR_UDN_DEADLOCK_TIMEOUT 0x1906 +#define SPR_UDN_DEMUX_COUNT_0 0x0b05 +#define SPR_UDN_DEMUX_COUNT_1 0x0b06 +#define SPR_UDN_DEMUX_COUNT_2 0x0b07 +#define SPR_UDN_DEMUX_COUNT_3 0x0b08 +#define SPR_UDN_DIRECTION_PROTECT 0x1505 +#define SPR_UDN_PENDING 0x0b0a +#define SPR_WATCH_MASK 0x200a +#define SPR_WATCH_VAL 0x200b + +#endif /* !defined(__ARCH_SPR_DEF_64_H__) */ + +#endif /* !defined(__DOXYGEN__) */ diff --git a/qemu/target-tilegx/translate.c b/qemu/target-tilegx/translate.c new file mode 100644 index 000000000..03918ebd5 --- /dev/null +++ b/qemu/target-tilegx/translate.c @@ -0,0 +1,2451 @@ +/* + * QEMU TILE-Gx CPU + * + * Copyright (c) 2015 Chen Gang + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "qemu/log.h" +#include "exec/log.h" +#include "disas/disas.h" +#include "tcg-op.h" +#include "exec/cpu_ldst.h" +#include "linux-user/syscall_defs.h" + +#include "opcode_tilegx.h" +#include "spr_def_64.h" + +#define FMT64X "%016" PRIx64 + +static TCGv_env cpu_env; +static TCGv cpu_pc; +static TCGv cpu_regs[TILEGX_R_COUNT]; + +static const char * const reg_names[64] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", + "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", + "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", + "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr", + "sn", "idn0", "idn1", "udn0", "udn1", "udn2", "udn2", "zero" +}; + +/* Modified registers are cached in temporaries until the end of the bundle. */ +typedef struct { + unsigned reg; + TCGv val; +} DisasContextTemp; + +#define MAX_WRITEBACK 4 + +/* This is the state at translation time. */ +typedef struct { + uint64_t pc; /* Current pc */ + + TCGv zero; /* For zero register */ + + DisasContextTemp wb[MAX_WRITEBACK]; + int num_wb; + int mmuidx; + bool exit_tb; + TileExcp atomic_excp; + + struct { + TCGCond cond; /* branch condition */ + TCGv dest; /* branch destination */ + TCGv val1; /* value to be compared against zero, for cond */ + } jmp; /* Jump object, only once in each TB block */ +} DisasContext; + +#include "exec/gen-icount.h" + +/* Differentiate the various pipe encodings. */ +#define TY_X0 0 +#define TY_X1 1 +#define TY_Y0 2 +#define TY_Y1 3 + +/* Remerge the base opcode and extension fields for switching. + The X opcode fields are 3 bits; Y0/Y1 opcode fields are 4 bits; + Y2 opcode field is 2 bits. */ +#define OE(OP, EXT, XY) (TY_##XY + OP * 4 + EXT * 64) + +/* Similar, but for Y2 only. */ +#define OEY2(OP, MODE) (OP + MODE * 4) + +/* Similar, but make sure opcode names match up. */ +#define OE_RR_X0(E) OE(RRR_0_OPCODE_X0, E##_UNARY_OPCODE_X0, X0) +#define OE_RR_X1(E) OE(RRR_0_OPCODE_X1, E##_UNARY_OPCODE_X1, X1) +#define OE_RR_Y0(E) OE(RRR_1_OPCODE_Y0, E##_UNARY_OPCODE_Y0, Y0) +#define OE_RR_Y1(E) OE(RRR_1_OPCODE_Y1, E##_UNARY_OPCODE_Y1, Y1) +#define OE_RRR(E,N,XY) OE(RRR_##N##_OPCODE_##XY, E##_RRR_##N##_OPCODE_##XY, XY) +#define OE_IM(E,XY) OE(IMM8_OPCODE_##XY, E##_IMM8_OPCODE_##XY, XY) +#define OE_SH(E,XY) OE(SHIFT_OPCODE_##XY, E##_SHIFT_OPCODE_##XY, XY) + +#define V1_IMM(X) (((X) & 0xff) * 0x0101010101010101ull) +#define V2_IMM(X) (((X) & 0xffff) * 0x0001000100010001ull) + + +static void gen_exception(DisasContext *dc, TileExcp num) +{ + TCGv_i32 tmp; + + tcg_gen_movi_tl(cpu_pc, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES); + + tmp = tcg_const_i32(num); + gen_helper_exception(cpu_env, tmp); + tcg_temp_free_i32(tmp); + dc->exit_tb = true; +} + +static bool check_gr(DisasContext *dc, uint8_t reg) +{ + if (likely(reg < TILEGX_R_COUNT)) { + return true; + } + + switch (reg) { + case TILEGX_R_SN: + case TILEGX_R_ZERO: + break; + case TILEGX_R_IDN0: + case TILEGX_R_IDN1: + gen_exception(dc, TILEGX_EXCP_REG_IDN_ACCESS); + break; + case TILEGX_R_UDN0: + case TILEGX_R_UDN1: + case TILEGX_R_UDN2: + case TILEGX_R_UDN3: + gen_exception(dc, TILEGX_EXCP_REG_UDN_ACCESS); + break; + default: + g_assert_not_reached(); + } + return false; +} + +static TCGv load_zero(DisasContext *dc) +{ + if (TCGV_IS_UNUSED_I64(dc->zero)) { + dc->zero = tcg_const_i64(0); + } + return dc->zero; +} + +static TCGv load_gr(DisasContext *dc, unsigned reg) +{ + if (check_gr(dc, reg)) { + return cpu_regs[reg]; + } + return load_zero(dc); +} + +static TCGv dest_gr(DisasContext *dc, unsigned reg) +{ + int n; + + /* Skip the result, mark the exception if necessary, and continue */ + check_gr(dc, reg); + + n = dc->num_wb++; + dc->wb[n].reg = reg; + return dc->wb[n].val = tcg_temp_new_i64(); +} + +static void gen_saturate_op(TCGv tdest, TCGv tsrca, TCGv tsrcb, + void (*operate)(TCGv, TCGv, TCGv)) +{ + TCGv t0 = tcg_temp_new(); + + tcg_gen_ext32s_tl(tdest, tsrca); + tcg_gen_ext32s_tl(t0, tsrcb); + operate(tdest, tdest, t0); + + tcg_gen_movi_tl(t0, 0x7fffffff); + tcg_gen_movcond_tl(TCG_COND_GT, tdest, tdest, t0, t0, tdest); + tcg_gen_movi_tl(t0, -0x80000000LL); + tcg_gen_movcond_tl(TCG_COND_LT, tdest, tdest, t0, t0, tdest); + + tcg_temp_free(t0); +} + +static void gen_atomic_excp(DisasContext *dc, unsigned dest, TCGv tdest, + TCGv tsrca, TCGv tsrcb, TileExcp excp) +{ +#ifdef CONFIG_USER_ONLY + TCGv_i32 t; + + tcg_gen_st_tl(tsrca, cpu_env, offsetof(CPUTLGState, atomic_srca)); + tcg_gen_st_tl(tsrcb, cpu_env, offsetof(CPUTLGState, atomic_srcb)); + t = tcg_const_i32(dest); + tcg_gen_st_i32(t, cpu_env, offsetof(CPUTLGState, atomic_dstr)); + tcg_temp_free_i32(t); + + /* We're going to write the real result in the exception. But in + the meantime we've already created a writeback register, and + we don't want that to remain uninitialized. */ + tcg_gen_movi_tl(tdest, 0); + + /* Note that we need to delay issuing the exception that implements + the atomic operation until after writing back the results of the + instruction occupying the X0 pipe. */ + dc->atomic_excp = excp; +#else + gen_exception(dc, TILEGX_EXCP_OPCODE_UNIMPLEMENTED); +#endif +} + +/* Shift the 128-bit value TSRCA:TSRCD right by the number of bytes + specified by the bottom 3 bits of TSRCB, and set TDEST to the + low 64 bits of the resulting value. */ +static void gen_dblalign(TCGv tdest, TCGv tsrcd, TCGv tsrca, TCGv tsrcb) +{ + TCGv t0 = tcg_temp_new(); + + tcg_gen_andi_tl(t0, tsrcb, 7); + tcg_gen_shli_tl(t0, t0, 3); + tcg_gen_shr_tl(tdest, tsrcd, t0); + + /* We want to do "t0 = tsrca << (64 - t0)". Two's complement + arithmetic on a 6-bit field tells us that 64 - t0 is equal + to (t0 ^ 63) + 1. So we can do the shift in two parts, + neither of which will be an invalid shift by 64. */ + tcg_gen_xori_tl(t0, t0, 63); + tcg_gen_shl_tl(t0, tsrca, t0); + tcg_gen_shli_tl(t0, t0, 1); + tcg_gen_or_tl(tdest, tdest, t0); + + tcg_temp_free(t0); +} + +/* Similarly, except that the 128-bit value is TSRCA:TSRCB, and the + right shift is an immediate. */ +static void gen_dblaligni(TCGv tdest, TCGv tsrca, TCGv tsrcb, int shr) +{ + TCGv t0 = tcg_temp_new(); + + tcg_gen_shri_tl(t0, tsrcb, shr); + tcg_gen_shli_tl(tdest, tsrca, 64 - shr); + tcg_gen_or_tl(tdest, tdest, t0); + + tcg_temp_free(t0); +} + +typedef enum { + LU, LS, HU, HS +} MulHalf; + +static void gen_ext_half(TCGv d, TCGv s, MulHalf h) +{ + switch (h) { + case LU: + tcg_gen_ext32u_tl(d, s); + break; + case LS: + tcg_gen_ext32s_tl(d, s); + break; + case HU: + tcg_gen_shri_tl(d, s, 32); + break; + case HS: + tcg_gen_sari_tl(d, s, 32); + break; + } +} + +static void gen_mul_half(TCGv tdest, TCGv tsrca, TCGv tsrcb, + MulHalf ha, MulHalf hb) +{ + TCGv t = tcg_temp_new(); + gen_ext_half(t, tsrca, ha); + gen_ext_half(tdest, tsrcb, hb); + tcg_gen_mul_tl(tdest, tdest, t); + tcg_temp_free(t); +} + +static void gen_cmul2(TCGv tdest, TCGv tsrca, TCGv tsrcb, int sh, int rd) +{ + TCGv_i32 tsh = tcg_const_i32(sh); + TCGv_i32 trd = tcg_const_i32(rd); + gen_helper_cmul2(tdest, tsrca, tsrcb, tsh, trd); + tcg_temp_free_i32(tsh); + tcg_temp_free_i32(trd); +} + +static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca, + unsigned srcb, TCGMemOp memop, const char *name) +{ + if (dest) { + return TILEGX_EXCP_OPCODE_UNKNOWN; + } + + tcg_gen_qemu_st_tl(load_gr(dc, srcb), load_gr(dc, srca), + dc->mmuidx, memop); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", name, + reg_names[srca], reg_names[srcb]); + return TILEGX_EXCP_NONE; +} + +static TileExcp gen_st_add_opcode(DisasContext *dc, unsigned srca, unsigned srcb, + int imm, TCGMemOp memop, const char *name) +{ + TCGv tsrca = load_gr(dc, srca); + TCGv tsrcb = load_gr(dc, srcb); + + tcg_gen_qemu_st_tl(tsrcb, tsrca, dc->mmuidx, memop); + tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", name, + reg_names[srca], reg_names[srcb], imm); + return TILEGX_EXCP_NONE; +} + +/* Equality comparison with zero can be done quickly and efficiently. */ +static void gen_v1cmpeq0(TCGv v) +{ + TCGv m = tcg_const_tl(V1_IMM(0x7f)); + TCGv c = tcg_temp_new(); + + /* ~(((v & m) + m) | m | v). Sets the msb for each byte == 0. */ + tcg_gen_and_tl(c, v, m); + tcg_gen_add_tl(c, c, m); + tcg_gen_or_tl(c, c, m); + tcg_gen_nor_tl(c, c, v); + tcg_temp_free(m); + + /* Shift the msb down to form the lsb boolean result. */ + tcg_gen_shri_tl(v, c, 7); + tcg_temp_free(c); +} + +static void gen_v1cmpne0(TCGv v) +{ + TCGv m = tcg_const_tl(V1_IMM(0x7f)); + TCGv c = tcg_temp_new(); + + /* (((v & m) + m) | v) & ~m. Sets the msb for each byte != 0. */ + tcg_gen_and_tl(c, v, m); + tcg_gen_add_tl(c, c, m); + tcg_gen_or_tl(c, c, v); + tcg_gen_andc_tl(c, c, m); + tcg_temp_free(m); + + /* Shift the msb down to form the lsb boolean result. */ + tcg_gen_shri_tl(v, c, 7); + tcg_temp_free(c); +} + +/* Vector addition can be performed via arithmetic plus masking. It is + efficient this way only for 4 or more elements. */ +static void gen_v12add(TCGv tdest, TCGv tsrca, TCGv tsrcb, uint64_t sign) +{ + TCGv tmask = tcg_const_tl(~sign); + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + /* ((a & ~sign) + (b & ~sign)) ^ ((a ^ b) & sign). */ + tcg_gen_and_tl(t0, tsrca, tmask); + tcg_gen_and_tl(t1, tsrcb, tmask); + tcg_gen_add_tl(tdest, t0, t1); + tcg_gen_xor_tl(t0, tsrca, tsrcb); + tcg_gen_andc_tl(t0, t0, tmask); + tcg_gen_xor_tl(tdest, tdest, t0); + + tcg_temp_free(t1); + tcg_temp_free(t0); + tcg_temp_free(tmask); +} + +/* Similarly for vector subtraction. */ +static void gen_v12sub(TCGv tdest, TCGv tsrca, TCGv tsrcb, uint64_t sign) +{ + TCGv tsign = tcg_const_tl(sign); + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + /* ((a | sign) - (b & ~sign)) ^ ((a ^ ~b) & sign). */ + tcg_gen_or_tl(t0, tsrca, tsign); + tcg_gen_andc_tl(t1, tsrcb, tsign); + tcg_gen_sub_tl(tdest, t0, t1); + tcg_gen_eqv_tl(t0, tsrca, tsrcb); + tcg_gen_and_tl(t0, t0, tsign); + tcg_gen_xor_tl(tdest, tdest, t0); + + tcg_temp_free(t1); + tcg_temp_free(t0); + tcg_temp_free(tsign); +} + +static void gen_v4sh(TCGv d64, TCGv a64, TCGv b64, + void (*generate)(TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 al = tcg_temp_new_i32(); + TCGv_i32 ah = tcg_temp_new_i32(); + TCGv_i32 bl = tcg_temp_new_i32(); + + tcg_gen_extr_i64_i32(al, ah, a64); + tcg_gen_extrl_i64_i32(bl, b64); + tcg_gen_andi_i32(bl, bl, 31); + generate(al, al, bl); + generate(ah, ah, bl); + tcg_gen_concat_i32_i64(d64, al, ah); + + tcg_temp_free_i32(al); + tcg_temp_free_i32(ah); + tcg_temp_free_i32(bl); +} + +static void gen_v4op(TCGv d64, TCGv a64, TCGv b64, + void (*generate)(TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 al = tcg_temp_new_i32(); + TCGv_i32 ah = tcg_temp_new_i32(); + TCGv_i32 bl = tcg_temp_new_i32(); + TCGv_i32 bh = tcg_temp_new_i32(); + + tcg_gen_extr_i64_i32(al, ah, a64); + tcg_gen_extr_i64_i32(bl, bh, b64); + generate(al, al, bl); + generate(ah, ah, bh); + tcg_gen_concat_i32_i64(d64, al, ah); + + tcg_temp_free_i32(al); + tcg_temp_free_i32(ah); + tcg_temp_free_i32(bl); + tcg_temp_free_i32(bh); +} + +static TileExcp gen_signal(DisasContext *dc, int signo, int sigcode, + const char *mnemonic) +{ + TCGv_i32 t0 = tcg_const_i32(signo); + TCGv_i32 t1 = tcg_const_i32(sigcode); + + tcg_gen_st_i32(t0, cpu_env, offsetof(CPUTLGState, signo)); + tcg_gen_st_i32(t1, cpu_env, offsetof(CPUTLGState, sigcode)); + + tcg_temp_free_i32(t1); + tcg_temp_free_i32(t0); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic); + return TILEGX_EXCP_SIGNAL; +} + +static bool parse_from_addli(uint64_t bundle, int *signo, int *sigcode) +{ + int imm; + + if ((get_Opcode_X0(bundle) != ADDLI_OPCODE_X0) + || (get_Dest_X0(bundle) != TILEGX_R_ZERO) + || (get_SrcA_X0(bundle) != TILEGX_R_ZERO)) { + return false; + } + + imm = get_Imm16_X0(bundle); + *signo = imm & 0x3f; + *sigcode = (imm >> 6) & 0xf; + + /* ??? The linux kernel validates both signo and the sigcode vs the + known max for each signal. Don't bother here. */ + return true; +} + +static TileExcp gen_specill(DisasContext *dc, unsigned dest, unsigned srca, + uint64_t bundle) +{ + const char *mnemonic; + int signo; + int sigcode; + + if (dest == 0x1c && srca == 0x25) { + signo = TARGET_SIGTRAP; + sigcode = TARGET_TRAP_BRKPT; + mnemonic = "bpt"; + } else if (dest == 0x1d && srca == 0x25 + && parse_from_addli(bundle, &signo, &sigcode)) { + mnemonic = "raise"; + } else { + signo = TARGET_SIGILL; + sigcode = TARGET_ILL_ILLOPC; + mnemonic = "ill"; + } + + return gen_signal(dc, signo, sigcode, mnemonic); +} + +static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext, + unsigned dest, unsigned srca, uint64_t bundle) +{ + TCGv tdest, tsrca; + const char *mnemonic; + TCGMemOp memop; + TileExcp ret = TILEGX_EXCP_NONE; + bool prefetch_nofault = false; + + /* Eliminate instructions with no output before doing anything else. */ + switch (opext) { + case OE_RR_Y0(NOP): + case OE_RR_Y1(NOP): + case OE_RR_X0(NOP): + case OE_RR_X1(NOP): + mnemonic = "nop"; + goto done0; + case OE_RR_Y0(FNOP): + case OE_RR_Y1(FNOP): + case OE_RR_X0(FNOP): + case OE_RR_X1(FNOP): + mnemonic = "fnop"; + goto done0; + case OE_RR_X1(DRAIN): + mnemonic = "drain"; + goto done0; + case OE_RR_X1(FLUSHWB): + mnemonic = "flushwb"; + goto done0; + case OE_RR_X1(ILL): + return gen_specill(dc, dest, srca, bundle); + case OE_RR_Y1(ILL): + return gen_signal(dc, TARGET_SIGILL, TARGET_ILL_ILLOPC, "ill"); + case OE_RR_X1(MF): + mnemonic = "mf"; + goto done0; + case OE_RR_X1(NAP): + /* ??? This should yield, especially in system mode. */ + mnemonic = "nap"; + goto done0; + case OE_RR_X1(IRET): + gen_helper_ext01_ics(cpu_env); + dc->jmp.cond = TCG_COND_ALWAYS; + dc->jmp.dest = tcg_temp_new(); + tcg_gen_ld_tl(dc->jmp.dest, cpu_env, + offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_0])); + tcg_gen_andi_tl(dc->jmp.dest, dc->jmp.dest, ~7); + mnemonic = "iret"; + goto done0; + case OE_RR_X1(SWINT0): + case OE_RR_X1(SWINT2): + case OE_RR_X1(SWINT3): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RR_X1(SWINT1): + ret = TILEGX_EXCP_SYSCALL; + mnemonic = "swint1"; + done0: + if (srca || dest) { + return TILEGX_EXCP_OPCODE_UNKNOWN; + } + qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic); + return ret; + + case OE_RR_X1(DTLBPR): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RR_X1(FINV): + mnemonic = "finv"; + goto done1; + case OE_RR_X1(FLUSH): + mnemonic = "flush"; + goto done1; + case OE_RR_X1(ICOH): + mnemonic = "icoh"; + goto done1; + case OE_RR_X1(INV): + mnemonic = "inv"; + goto done1; + case OE_RR_X1(WH64): + mnemonic = "wh64"; + goto done1; + case OE_RR_X1(JRP): + case OE_RR_Y1(JRP): + mnemonic = "jrp"; + goto do_jr; + case OE_RR_X1(JR): + case OE_RR_Y1(JR): + mnemonic = "jr"; + goto do_jr; + case OE_RR_X1(JALRP): + case OE_RR_Y1(JALRP): + mnemonic = "jalrp"; + goto do_jalr; + case OE_RR_X1(JALR): + case OE_RR_Y1(JALR): + mnemonic = "jalr"; + do_jalr: + tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR), + dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES); + do_jr: + dc->jmp.cond = TCG_COND_ALWAYS; + dc->jmp.dest = tcg_temp_new(); + tcg_gen_andi_tl(dc->jmp.dest, load_gr(dc, srca), ~7); + done1: + if (dest) { + return TILEGX_EXCP_OPCODE_UNKNOWN; + } + qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s", mnemonic, reg_names[srca]); + return ret; + } + + tdest = dest_gr(dc, dest); + tsrca = load_gr(dc, srca); + + switch (opext) { + case OE_RR_X0(CNTLZ): + case OE_RR_Y0(CNTLZ): + gen_helper_cntlz(tdest, tsrca); + mnemonic = "cntlz"; + break; + case OE_RR_X0(CNTTZ): + case OE_RR_Y0(CNTTZ): + gen_helper_cnttz(tdest, tsrca); + mnemonic = "cnttz"; + break; + case OE_RR_X0(FSINGLE_PACK1): + case OE_RR_Y0(FSINGLE_PACK1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RR_X1(LD1S): + memop = MO_SB; + mnemonic = "ld1s"; /* prefetch_l1_fault */ + goto do_load; + case OE_RR_X1(LD1U): + memop = MO_UB; + mnemonic = "ld1u"; /* prefetch, prefetch_l1 */ + prefetch_nofault = (dest == TILEGX_R_ZERO); + goto do_load; + case OE_RR_X1(LD2S): + memop = MO_TESW; + mnemonic = "ld2s"; /* prefetch_l2_fault */ + goto do_load; + case OE_RR_X1(LD2U): + memop = MO_TEUW; + mnemonic = "ld2u"; /* prefetch_l2 */ + prefetch_nofault = (dest == TILEGX_R_ZERO); + goto do_load; + case OE_RR_X1(LD4S): + memop = MO_TESL; + mnemonic = "ld4s"; /* prefetch_l3_fault */ + goto do_load; + case OE_RR_X1(LD4U): + memop = MO_TEUL; + mnemonic = "ld4u"; /* prefetch_l3 */ + prefetch_nofault = (dest == TILEGX_R_ZERO); + goto do_load; + case OE_RR_X1(LDNT1S): + memop = MO_SB; + mnemonic = "ldnt1s"; + goto do_load; + case OE_RR_X1(LDNT1U): + memop = MO_UB; + mnemonic = "ldnt1u"; + goto do_load; + case OE_RR_X1(LDNT2S): + memop = MO_TESW; + mnemonic = "ldnt2s"; + goto do_load; + case OE_RR_X1(LDNT2U): + memop = MO_TEUW; + mnemonic = "ldnt2u"; + goto do_load; + case OE_RR_X1(LDNT4S): + memop = MO_TESL; + mnemonic = "ldnt4s"; + goto do_load; + case OE_RR_X1(LDNT4U): + memop = MO_TEUL; + mnemonic = "ldnt4u"; + goto do_load; + case OE_RR_X1(LDNT): + memop = MO_TEQ; + mnemonic = "ldnt"; + goto do_load; + case OE_RR_X1(LD): + memop = MO_TEQ; + mnemonic = "ld"; + do_load: + if (!prefetch_nofault) { + tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop); + } + break; + case OE_RR_X1(LDNA): + tcg_gen_andi_tl(tdest, tsrca, ~7); + tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ); + mnemonic = "ldna"; + break; + case OE_RR_X1(LNK): + case OE_RR_Y1(LNK): + if (srca) { + return TILEGX_EXCP_OPCODE_UNKNOWN; + } + tcg_gen_movi_tl(tdest, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES); + mnemonic = "lnk"; + break; + case OE_RR_X0(PCNT): + case OE_RR_Y0(PCNT): + gen_helper_pcnt(tdest, tsrca); + mnemonic = "pcnt"; + break; + case OE_RR_X0(REVBITS): + case OE_RR_Y0(REVBITS): + gen_helper_revbits(tdest, tsrca); + mnemonic = "revbits"; + break; + case OE_RR_X0(REVBYTES): + case OE_RR_Y0(REVBYTES): + tcg_gen_bswap64_tl(tdest, tsrca); + mnemonic = "revbytes"; + break; + case OE_RR_X0(TBLIDXB0): + case OE_RR_Y0(TBLIDXB0): + tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tsrca, 2, 8); + mnemonic = "tblidxb0"; + break; + case OE_RR_X0(TBLIDXB1): + case OE_RR_Y0(TBLIDXB1): + tcg_gen_shri_tl(tdest, tsrca, 8); + tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8); + mnemonic = "tblidxb1"; + break; + case OE_RR_X0(TBLIDXB2): + case OE_RR_Y0(TBLIDXB2): + tcg_gen_shri_tl(tdest, tsrca, 16); + tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8); + mnemonic = "tblidxb2"; + break; + case OE_RR_X0(TBLIDXB3): + case OE_RR_Y0(TBLIDXB3): + tcg_gen_shri_tl(tdest, tsrca, 24); + tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8); + mnemonic = "tblidxb3"; + break; + default: + return TILEGX_EXCP_OPCODE_UNKNOWN; + } + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic, + reg_names[dest], reg_names[srca]); + return ret; +} + +static TileExcp gen_rrr_opcode(DisasContext *dc, unsigned opext, + unsigned dest, unsigned srca, unsigned srcb) +{ + TCGv tdest = dest_gr(dc, dest); + TCGv tsrca = load_gr(dc, srca); + TCGv tsrcb = load_gr(dc, srcb); + TCGv t0; + const char *mnemonic; + + switch (opext) { + case OE_RRR(ADDXSC, 0, X0): + case OE_RRR(ADDXSC, 0, X1): + gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_add_tl); + mnemonic = "addxsc"; + break; + case OE_RRR(ADDX, 0, X0): + case OE_RRR(ADDX, 0, X1): + case OE_RRR(ADDX, 0, Y0): + case OE_RRR(ADDX, 0, Y1): + tcg_gen_add_tl(tdest, tsrca, tsrcb); + tcg_gen_ext32s_tl(tdest, tdest); + mnemonic = "addx"; + break; + case OE_RRR(ADD, 0, X0): + case OE_RRR(ADD, 0, X1): + case OE_RRR(ADD, 0, Y0): + case OE_RRR(ADD, 0, Y1): + tcg_gen_add_tl(tdest, tsrca, tsrcb); + mnemonic = "add"; + break; + case OE_RRR(AND, 0, X0): + case OE_RRR(AND, 0, X1): + case OE_RRR(AND, 5, Y0): + case OE_RRR(AND, 5, Y1): + tcg_gen_and_tl(tdest, tsrca, tsrcb); + mnemonic = "and"; + break; + case OE_RRR(CMOVEQZ, 0, X0): + case OE_RRR(CMOVEQZ, 4, Y0): + tcg_gen_movcond_tl(TCG_COND_EQ, tdest, tsrca, load_zero(dc), + tsrcb, load_gr(dc, dest)); + mnemonic = "cmoveqz"; + break; + case OE_RRR(CMOVNEZ, 0, X0): + case OE_RRR(CMOVNEZ, 4, Y0): + tcg_gen_movcond_tl(TCG_COND_NE, tdest, tsrca, load_zero(dc), + tsrcb, load_gr(dc, dest)); + mnemonic = "cmovnez"; + break; + case OE_RRR(CMPEQ, 0, X0): + case OE_RRR(CMPEQ, 0, X1): + case OE_RRR(CMPEQ, 3, Y0): + case OE_RRR(CMPEQ, 3, Y1): + tcg_gen_setcond_tl(TCG_COND_EQ, tdest, tsrca, tsrcb); + mnemonic = "cmpeq"; + break; + case OE_RRR(CMPEXCH4, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_CMPEXCH4); + mnemonic = "cmpexch4"; + break; + case OE_RRR(CMPEXCH, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_CMPEXCH); + mnemonic = "cmpexch"; + break; + case OE_RRR(CMPLES, 0, X0): + case OE_RRR(CMPLES, 0, X1): + case OE_RRR(CMPLES, 2, Y0): + case OE_RRR(CMPLES, 2, Y1): + tcg_gen_setcond_tl(TCG_COND_LE, tdest, tsrca, tsrcb); + mnemonic = "cmples"; + break; + case OE_RRR(CMPLEU, 0, X0): + case OE_RRR(CMPLEU, 0, X1): + case OE_RRR(CMPLEU, 2, Y0): + case OE_RRR(CMPLEU, 2, Y1): + tcg_gen_setcond_tl(TCG_COND_LEU, tdest, tsrca, tsrcb); + mnemonic = "cmpleu"; + break; + case OE_RRR(CMPLTS, 0, X0): + case OE_RRR(CMPLTS, 0, X1): + case OE_RRR(CMPLTS, 2, Y0): + case OE_RRR(CMPLTS, 2, Y1): + tcg_gen_setcond_tl(TCG_COND_LT, tdest, tsrca, tsrcb); + mnemonic = "cmplts"; + break; + case OE_RRR(CMPLTU, 0, X0): + case OE_RRR(CMPLTU, 0, X1): + case OE_RRR(CMPLTU, 2, Y0): + case OE_RRR(CMPLTU, 2, Y1): + tcg_gen_setcond_tl(TCG_COND_LTU, tdest, tsrca, tsrcb); + mnemonic = "cmpltu"; + break; + case OE_RRR(CMPNE, 0, X0): + case OE_RRR(CMPNE, 0, X1): + case OE_RRR(CMPNE, 3, Y0): + case OE_RRR(CMPNE, 3, Y1): + tcg_gen_setcond_tl(TCG_COND_NE, tdest, tsrca, tsrcb); + mnemonic = "cmpne"; + break; + case OE_RRR(CMULAF, 0, X0): + gen_helper_cmulaf(tdest, load_gr(dc, dest), tsrca, tsrcb); + mnemonic = "cmulaf"; + break; + case OE_RRR(CMULA, 0, X0): + gen_helper_cmula(tdest, load_gr(dc, dest), tsrca, tsrcb); + mnemonic = "cmula"; + break; + case OE_RRR(CMULFR, 0, X0): + gen_cmul2(tdest, tsrca, tsrcb, 15, 1 << 14); + mnemonic = "cmulfr"; + break; + case OE_RRR(CMULF, 0, X0): + gen_cmul2(tdest, tsrca, tsrcb, 15, 0); + mnemonic = "cmulf"; + break; + case OE_RRR(CMULHR, 0, X0): + gen_cmul2(tdest, tsrca, tsrcb, 16, 1 << 15); + mnemonic = "cmulhr"; + break; + case OE_RRR(CMULH, 0, X0): + gen_cmul2(tdest, tsrca, tsrcb, 16, 0); + mnemonic = "cmulh"; + break; + case OE_RRR(CMUL, 0, X0): + gen_helper_cmula(tdest, load_zero(dc), tsrca, tsrcb); + mnemonic = "cmul"; + break; + case OE_RRR(CRC32_32, 0, X0): + gen_helper_crc32_32(tdest, tsrca, tsrcb); + mnemonic = "crc32_32"; + break; + case OE_RRR(CRC32_8, 0, X0): + gen_helper_crc32_8(tdest, tsrca, tsrcb); + mnemonic = "crc32_8"; + break; + case OE_RRR(DBLALIGN2, 0, X0): + case OE_RRR(DBLALIGN2, 0, X1): + gen_dblaligni(tdest, tsrca, tsrcb, 16); + mnemonic = "dblalign2"; + break; + case OE_RRR(DBLALIGN4, 0, X0): + case OE_RRR(DBLALIGN4, 0, X1): + gen_dblaligni(tdest, tsrca, tsrcb, 32); + mnemonic = "dblalign4"; + break; + case OE_RRR(DBLALIGN6, 0, X0): + case OE_RRR(DBLALIGN6, 0, X1): + gen_dblaligni(tdest, tsrca, tsrcb, 48); + mnemonic = "dblalign6"; + break; + case OE_RRR(DBLALIGN, 0, X0): + gen_dblalign(tdest, load_gr(dc, dest), tsrca, tsrcb); + mnemonic = "dblalign"; + break; + case OE_RRR(EXCH4, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_EXCH4); + mnemonic = "exch4"; + break; + case OE_RRR(EXCH, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_EXCH); + mnemonic = "exch"; + break; + case OE_RRR(FDOUBLE_ADDSUB, 0, X0): + case OE_RRR(FDOUBLE_ADD_FLAGS, 0, X0): + case OE_RRR(FDOUBLE_MUL_FLAGS, 0, X0): + case OE_RRR(FDOUBLE_PACK1, 0, X0): + case OE_RRR(FDOUBLE_PACK2, 0, X0): + case OE_RRR(FDOUBLE_SUB_FLAGS, 0, X0): + case OE_RRR(FDOUBLE_UNPACK_MAX, 0, X0): + case OE_RRR(FDOUBLE_UNPACK_MIN, 0, X0): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(FETCHADD4, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_FETCHADD4); + mnemonic = "fetchadd4"; + break; + case OE_RRR(FETCHADDGEZ4, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_FETCHADDGEZ4); + mnemonic = "fetchaddgez4"; + break; + case OE_RRR(FETCHADDGEZ, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_FETCHADDGEZ); + mnemonic = "fetchaddgez"; + break; + case OE_RRR(FETCHADD, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_FETCHADD); + mnemonic = "fetchadd"; + break; + case OE_RRR(FETCHAND4, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_FETCHAND4); + mnemonic = "fetchand4"; + break; + case OE_RRR(FETCHAND, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_FETCHAND); + mnemonic = "fetchand"; + break; + case OE_RRR(FETCHOR4, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_FETCHOR4); + mnemonic = "fetchor4"; + break; + case OE_RRR(FETCHOR, 0, X1): + gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb, + TILEGX_EXCP_OPCODE_FETCHOR); + mnemonic = "fetchor"; + break; + case OE_RRR(FSINGLE_ADD1, 0, X0): + case OE_RRR(FSINGLE_ADDSUB2, 0, X0): + case OE_RRR(FSINGLE_MUL1, 0, X0): + case OE_RRR(FSINGLE_MUL2, 0, X0): + case OE_RRR(FSINGLE_PACK2, 0, X0): + case OE_RRR(FSINGLE_SUB1, 0, X0): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(MNZ, 0, X0): + case OE_RRR(MNZ, 0, X1): + case OE_RRR(MNZ, 4, Y0): + case OE_RRR(MNZ, 4, Y1): + t0 = load_zero(dc); + tcg_gen_movcond_tl(TCG_COND_NE, tdest, tsrca, t0, tsrcb, t0); + mnemonic = "mnz"; + break; + case OE_RRR(MULAX, 0, X0): + case OE_RRR(MULAX, 3, Y0): + tcg_gen_mul_tl(tdest, tsrca, tsrcb); + tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest)); + tcg_gen_ext32s_tl(tdest, tdest); + mnemonic = "mulax"; + break; + case OE_RRR(MULA_HS_HS, 0, X0): + case OE_RRR(MULA_HS_HS, 9, Y0): + gen_mul_half(tdest, tsrca, tsrcb, HS, HS); + tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest)); + mnemonic = "mula_hs_hs"; + break; + case OE_RRR(MULA_HS_HU, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, HS, HU); + tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest)); + mnemonic = "mula_hs_hu"; + break; + case OE_RRR(MULA_HS_LS, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, HS, LS); + tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest)); + mnemonic = "mula_hs_ls"; + break; + case OE_RRR(MULA_HS_LU, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, HS, LU); + tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest)); + mnemonic = "mula_hs_lu"; + break; + case OE_RRR(MULA_HU_HU, 0, X0): + case OE_RRR(MULA_HU_HU, 9, Y0): + gen_mul_half(tdest, tsrca, tsrcb, HU, HU); + tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest)); + mnemonic = "mula_hu_hu"; + break; + case OE_RRR(MULA_HU_LS, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, HU, LS); + tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest)); + mnemonic = "mula_hu_ls"; + break; + case OE_RRR(MULA_HU_LU, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, HU, LU); + tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest)); + mnemonic = "mula_hu_lu"; + break; + case OE_RRR(MULA_LS_LS, 0, X0): + case OE_RRR(MULA_LS_LS, 9, Y0): + gen_mul_half(tdest, tsrca, tsrcb, LS, LS); + tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest)); + mnemonic = "mula_ls_ls"; + break; + case OE_RRR(MULA_LS_LU, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, LS, LU); + tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest)); + mnemonic = "mula_ls_lu"; + break; + case OE_RRR(MULA_LU_LU, 0, X0): + case OE_RRR(MULA_LU_LU, 9, Y0): + gen_mul_half(tdest, tsrca, tsrcb, LU, LU); + tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest)); + mnemonic = "mula_lu_lu"; + break; + case OE_RRR(MULX, 0, X0): + case OE_RRR(MULX, 3, Y0): + tcg_gen_mul_tl(tdest, tsrca, tsrcb); + tcg_gen_ext32s_tl(tdest, tdest); + mnemonic = "mulx"; + break; + case OE_RRR(MUL_HS_HS, 0, X0): + case OE_RRR(MUL_HS_HS, 8, Y0): + gen_mul_half(tdest, tsrca, tsrcb, HS, HS); + mnemonic = "mul_hs_hs"; + break; + case OE_RRR(MUL_HS_HU, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, HS, HU); + mnemonic = "mul_hs_hu"; + break; + case OE_RRR(MUL_HS_LS, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, HS, LS); + mnemonic = "mul_hs_ls"; + break; + case OE_RRR(MUL_HS_LU, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, HS, LU); + mnemonic = "mul_hs_lu"; + break; + case OE_RRR(MUL_HU_HU, 0, X0): + case OE_RRR(MUL_HU_HU, 8, Y0): + gen_mul_half(tdest, tsrca, tsrcb, HU, HU); + mnemonic = "mul_hu_hu"; + break; + case OE_RRR(MUL_HU_LS, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, HU, LS); + mnemonic = "mul_hu_ls"; + break; + case OE_RRR(MUL_HU_LU, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, HU, LU); + mnemonic = "mul_hu_lu"; + break; + case OE_RRR(MUL_LS_LS, 0, X0): + case OE_RRR(MUL_LS_LS, 8, Y0): + gen_mul_half(tdest, tsrca, tsrcb, LS, LS); + mnemonic = "mul_ls_ls"; + break; + case OE_RRR(MUL_LS_LU, 0, X0): + gen_mul_half(tdest, tsrca, tsrcb, LS, LU); + mnemonic = "mul_ls_lu"; + break; + case OE_RRR(MUL_LU_LU, 0, X0): + case OE_RRR(MUL_LU_LU, 8, Y0): + gen_mul_half(tdest, tsrca, tsrcb, LU, LU); + mnemonic = "mul_lu_lu"; + break; + case OE_RRR(MZ, 0, X0): + case OE_RRR(MZ, 0, X1): + case OE_RRR(MZ, 4, Y0): + case OE_RRR(MZ, 4, Y1): + t0 = load_zero(dc); + tcg_gen_movcond_tl(TCG_COND_EQ, tdest, tsrca, t0, tsrcb, t0); + mnemonic = "mz"; + break; + case OE_RRR(NOR, 0, X0): + case OE_RRR(NOR, 0, X1): + case OE_RRR(NOR, 5, Y0): + case OE_RRR(NOR, 5, Y1): + tcg_gen_nor_tl(tdest, tsrca, tsrcb); + mnemonic = "nor"; + break; + case OE_RRR(OR, 0, X0): + case OE_RRR(OR, 0, X1): + case OE_RRR(OR, 5, Y0): + case OE_RRR(OR, 5, Y1): + tcg_gen_or_tl(tdest, tsrca, tsrcb); + mnemonic = "or"; + break; + case OE_RRR(ROTL, 0, X0): + case OE_RRR(ROTL, 0, X1): + case OE_RRR(ROTL, 6, Y0): + case OE_RRR(ROTL, 6, Y1): + tcg_gen_andi_tl(tdest, tsrcb, 63); + tcg_gen_rotl_tl(tdest, tsrca, tdest); + mnemonic = "rotl"; + break; + case OE_RRR(SHL1ADDX, 0, X0): + case OE_RRR(SHL1ADDX, 0, X1): + case OE_RRR(SHL1ADDX, 7, Y0): + case OE_RRR(SHL1ADDX, 7, Y1): + tcg_gen_shli_tl(tdest, tsrca, 1); + tcg_gen_add_tl(tdest, tdest, tsrcb); + tcg_gen_ext32s_tl(tdest, tdest); + mnemonic = "shl1addx"; + break; + case OE_RRR(SHL1ADD, 0, X0): + case OE_RRR(SHL1ADD, 0, X1): + case OE_RRR(SHL1ADD, 1, Y0): + case OE_RRR(SHL1ADD, 1, Y1): + tcg_gen_shli_tl(tdest, tsrca, 1); + tcg_gen_add_tl(tdest, tdest, tsrcb); + mnemonic = "shl1add"; + break; + case OE_RRR(SHL2ADDX, 0, X0): + case OE_RRR(SHL2ADDX, 0, X1): + case OE_RRR(SHL2ADDX, 7, Y0): + case OE_RRR(SHL2ADDX, 7, Y1): + tcg_gen_shli_tl(tdest, tsrca, 2); + tcg_gen_add_tl(tdest, tdest, tsrcb); + tcg_gen_ext32s_tl(tdest, tdest); + mnemonic = "shl2addx"; + break; + case OE_RRR(SHL2ADD, 0, X0): + case OE_RRR(SHL2ADD, 0, X1): + case OE_RRR(SHL2ADD, 1, Y0): + case OE_RRR(SHL2ADD, 1, Y1): + tcg_gen_shli_tl(tdest, tsrca, 2); + tcg_gen_add_tl(tdest, tdest, tsrcb); + mnemonic = "shl2add"; + break; + case OE_RRR(SHL3ADDX, 0, X0): + case OE_RRR(SHL3ADDX, 0, X1): + case OE_RRR(SHL3ADDX, 7, Y0): + case OE_RRR(SHL3ADDX, 7, Y1): + tcg_gen_shli_tl(tdest, tsrca, 3); + tcg_gen_add_tl(tdest, tdest, tsrcb); + tcg_gen_ext32s_tl(tdest, tdest); + mnemonic = "shl3addx"; + break; + case OE_RRR(SHL3ADD, 0, X0): + case OE_RRR(SHL3ADD, 0, X1): + case OE_RRR(SHL3ADD, 1, Y0): + case OE_RRR(SHL3ADD, 1, Y1): + tcg_gen_shli_tl(tdest, tsrca, 3); + tcg_gen_add_tl(tdest, tdest, tsrcb); + mnemonic = "shl3add"; + break; + case OE_RRR(SHLX, 0, X0): + case OE_RRR(SHLX, 0, X1): + tcg_gen_andi_tl(tdest, tsrcb, 31); + tcg_gen_shl_tl(tdest, tsrca, tdest); + tcg_gen_ext32s_tl(tdest, tdest); + mnemonic = "shlx"; + break; + case OE_RRR(SHL, 0, X0): + case OE_RRR(SHL, 0, X1): + case OE_RRR(SHL, 6, Y0): + case OE_RRR(SHL, 6, Y1): + tcg_gen_andi_tl(tdest, tsrcb, 63); + tcg_gen_shl_tl(tdest, tsrca, tdest); + mnemonic = "shl"; + break; + case OE_RRR(SHRS, 0, X0): + case OE_RRR(SHRS, 0, X1): + case OE_RRR(SHRS, 6, Y0): + case OE_RRR(SHRS, 6, Y1): + tcg_gen_andi_tl(tdest, tsrcb, 63); + tcg_gen_sar_tl(tdest, tsrca, tdest); + mnemonic = "shrs"; + break; + case OE_RRR(SHRUX, 0, X0): + case OE_RRR(SHRUX, 0, X1): + t0 = tcg_temp_new(); + tcg_gen_andi_tl(t0, tsrcb, 31); + tcg_gen_ext32u_tl(tdest, tsrca); + tcg_gen_shr_tl(tdest, tdest, t0); + tcg_gen_ext32s_tl(tdest, tdest); + tcg_temp_free(t0); + mnemonic = "shrux"; + break; + case OE_RRR(SHRU, 0, X0): + case OE_RRR(SHRU, 0, X1): + case OE_RRR(SHRU, 6, Y0): + case OE_RRR(SHRU, 6, Y1): + tcg_gen_andi_tl(tdest, tsrcb, 63); + tcg_gen_shr_tl(tdest, tsrca, tdest); + mnemonic = "shru"; + break; + case OE_RRR(SHUFFLEBYTES, 0, X0): + gen_helper_shufflebytes(tdest, load_gr(dc, dest), tsrca, tsrca); + mnemonic = "shufflebytes"; + break; + case OE_RRR(SUBXSC, 0, X0): + case OE_RRR(SUBXSC, 0, X1): + gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_sub_tl); + mnemonic = "subxsc"; + break; + case OE_RRR(SUBX, 0, X0): + case OE_RRR(SUBX, 0, X1): + case OE_RRR(SUBX, 0, Y0): + case OE_RRR(SUBX, 0, Y1): + tcg_gen_sub_tl(tdest, tsrca, tsrcb); + tcg_gen_ext32s_tl(tdest, tdest); + mnemonic = "subx"; + break; + case OE_RRR(SUB, 0, X0): + case OE_RRR(SUB, 0, X1): + case OE_RRR(SUB, 0, Y0): + case OE_RRR(SUB, 0, Y1): + tcg_gen_sub_tl(tdest, tsrca, tsrcb); + mnemonic = "sub"; + break; + case OE_RRR(V1ADDUC, 0, X0): + case OE_RRR(V1ADDUC, 0, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V1ADD, 0, X0): + case OE_RRR(V1ADD, 0, X1): + gen_v12add(tdest, tsrca, tsrcb, V1_IMM(0x80)); + mnemonic = "v1add"; + break; + case OE_RRR(V1ADIFFU, 0, X0): + case OE_RRR(V1AVGU, 0, X0): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V1CMPEQ, 0, X0): + case OE_RRR(V1CMPEQ, 0, X1): + tcg_gen_xor_tl(tdest, tsrca, tsrcb); + gen_v1cmpeq0(tdest); + mnemonic = "v1cmpeq"; + break; + case OE_RRR(V1CMPLES, 0, X0): + case OE_RRR(V1CMPLES, 0, X1): + case OE_RRR(V1CMPLEU, 0, X0): + case OE_RRR(V1CMPLEU, 0, X1): + case OE_RRR(V1CMPLTS, 0, X0): + case OE_RRR(V1CMPLTS, 0, X1): + case OE_RRR(V1CMPLTU, 0, X0): + case OE_RRR(V1CMPLTU, 0, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V1CMPNE, 0, X0): + case OE_RRR(V1CMPNE, 0, X1): + tcg_gen_xor_tl(tdest, tsrca, tsrcb); + gen_v1cmpne0(tdest); + mnemonic = "v1cmpne"; + break; + case OE_RRR(V1DDOTPUA, 0, X0): + case OE_RRR(V1DDOTPUSA, 0, X0): + case OE_RRR(V1DDOTPUS, 0, X0): + case OE_RRR(V1DDOTPU, 0, X0): + case OE_RRR(V1DOTPA, 0, X0): + case OE_RRR(V1DOTPUA, 0, X0): + case OE_RRR(V1DOTPUSA, 0, X0): + case OE_RRR(V1DOTPUS, 0, X0): + case OE_RRR(V1DOTPU, 0, X0): + case OE_RRR(V1DOTP, 0, X0): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V1INT_H, 0, X0): + case OE_RRR(V1INT_H, 0, X1): + gen_helper_v1int_h(tdest, tsrca, tsrcb); + mnemonic = "v1int_h"; + break; + case OE_RRR(V1INT_L, 0, X0): + case OE_RRR(V1INT_L, 0, X1): + gen_helper_v1int_l(tdest, tsrca, tsrcb); + mnemonic = "v1int_l"; + break; + case OE_RRR(V1MAXU, 0, X0): + case OE_RRR(V1MAXU, 0, X1): + case OE_RRR(V1MINU, 0, X0): + case OE_RRR(V1MINU, 0, X1): + case OE_RRR(V1MNZ, 0, X0): + case OE_RRR(V1MNZ, 0, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V1MULTU, 0, X0): + gen_helper_v1multu(tdest, tsrca, tsrcb); + mnemonic = "v1multu"; + break; + case OE_RRR(V1MULUS, 0, X0): + case OE_RRR(V1MULU, 0, X0): + case OE_RRR(V1MZ, 0, X0): + case OE_RRR(V1MZ, 0, X1): + case OE_RRR(V1SADAU, 0, X0): + case OE_RRR(V1SADU, 0, X0): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V1SHL, 0, X0): + case OE_RRR(V1SHL, 0, X1): + gen_helper_v1shl(tdest, tsrca, tsrcb); + mnemonic = "v1shl"; + break; + case OE_RRR(V1SHRS, 0, X0): + case OE_RRR(V1SHRS, 0, X1): + gen_helper_v1shrs(tdest, tsrca, tsrcb); + mnemonic = "v1shrs"; + break; + case OE_RRR(V1SHRU, 0, X0): + case OE_RRR(V1SHRU, 0, X1): + gen_helper_v1shru(tdest, tsrca, tsrcb); + mnemonic = "v1shru"; + break; + case OE_RRR(V1SUBUC, 0, X0): + case OE_RRR(V1SUBUC, 0, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V1SUB, 0, X0): + case OE_RRR(V1SUB, 0, X1): + gen_v12sub(tdest, tsrca, tsrcb, V1_IMM(0x80)); + mnemonic = "v1sub"; + break; + case OE_RRR(V2ADDSC, 0, X0): + case OE_RRR(V2ADDSC, 0, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V2ADD, 0, X0): + case OE_RRR(V2ADD, 0, X1): + gen_v12add(tdest, tsrca, tsrcb, V2_IMM(0x8000)); + mnemonic = "v2add"; + break; + case OE_RRR(V2ADIFFS, 0, X0): + case OE_RRR(V2AVGS, 0, X0): + case OE_RRR(V2CMPEQ, 0, X0): + case OE_RRR(V2CMPEQ, 0, X1): + case OE_RRR(V2CMPLES, 0, X0): + case OE_RRR(V2CMPLES, 0, X1): + case OE_RRR(V2CMPLEU, 0, X0): + case OE_RRR(V2CMPLEU, 0, X1): + case OE_RRR(V2CMPLTS, 0, X0): + case OE_RRR(V2CMPLTS, 0, X1): + case OE_RRR(V2CMPLTU, 0, X0): + case OE_RRR(V2CMPLTU, 0, X1): + case OE_RRR(V2CMPNE, 0, X0): + case OE_RRR(V2CMPNE, 0, X1): + case OE_RRR(V2DOTPA, 0, X0): + case OE_RRR(V2DOTP, 0, X0): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V2INT_H, 0, X0): + case OE_RRR(V2INT_H, 0, X1): + gen_helper_v2int_h(tdest, tsrca, tsrcb); + mnemonic = "v2int_h"; + break; + case OE_RRR(V2INT_L, 0, X0): + case OE_RRR(V2INT_L, 0, X1): + gen_helper_v2int_l(tdest, tsrca, tsrcb); + mnemonic = "v2int_l"; + break; + case OE_RRR(V2MAXS, 0, X0): + case OE_RRR(V2MAXS, 0, X1): + case OE_RRR(V2MINS, 0, X0): + case OE_RRR(V2MINS, 0, X1): + case OE_RRR(V2MNZ, 0, X0): + case OE_RRR(V2MNZ, 0, X1): + case OE_RRR(V2MULFSC, 0, X0): + case OE_RRR(V2MULS, 0, X0): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V2MULTS, 0, X0): + gen_helper_v2mults(tdest, tsrca, tsrcb); + mnemonic = "v2mults"; + break; + case OE_RRR(V2MZ, 0, X0): + case OE_RRR(V2MZ, 0, X1): + case OE_RRR(V2PACKH, 0, X0): + case OE_RRR(V2PACKH, 0, X1): + case OE_RRR(V2PACKL, 0, X0): + case OE_RRR(V2PACKL, 0, X1): + case OE_RRR(V2PACKUC, 0, X0): + case OE_RRR(V2PACKUC, 0, X1): + case OE_RRR(V2SADAS, 0, X0): + case OE_RRR(V2SADAU, 0, X0): + case OE_RRR(V2SADS, 0, X0): + case OE_RRR(V2SADU, 0, X0): + case OE_RRR(V2SHLSC, 0, X0): + case OE_RRR(V2SHLSC, 0, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V2SHL, 0, X0): + case OE_RRR(V2SHL, 0, X1): + gen_helper_v2shl(tdest, tsrca, tsrcb); + mnemonic = "v2shl"; + break; + case OE_RRR(V2SHRS, 0, X0): + case OE_RRR(V2SHRS, 0, X1): + gen_helper_v2shrs(tdest, tsrca, tsrcb); + mnemonic = "v2shrs"; + break; + case OE_RRR(V2SHRU, 0, X0): + case OE_RRR(V2SHRU, 0, X1): + gen_helper_v2shru(tdest, tsrca, tsrcb); + mnemonic = "v2shru"; + break; + case OE_RRR(V2SUBSC, 0, X0): + case OE_RRR(V2SUBSC, 0, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V2SUB, 0, X0): + case OE_RRR(V2SUB, 0, X1): + gen_v12sub(tdest, tsrca, tsrcb, V2_IMM(0x8000)); + mnemonic = "v2sub"; + break; + case OE_RRR(V4ADDSC, 0, X0): + case OE_RRR(V4ADDSC, 0, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V4ADD, 0, X0): + case OE_RRR(V4ADD, 0, X1): + gen_v4op(tdest, tsrca, tsrcb, tcg_gen_add_i32); + mnemonic = "v4add"; + break; + case OE_RRR(V4INT_H, 0, X0): + case OE_RRR(V4INT_H, 0, X1): + tcg_gen_shri_tl(tdest, tsrcb, 32); + tcg_gen_deposit_tl(tdest, tsrca, tdest, 0, 32); + mnemonic = "v4int_h"; + break; + case OE_RRR(V4INT_L, 0, X0): + case OE_RRR(V4INT_L, 0, X1): + tcg_gen_deposit_tl(tdest, tsrcb, tsrca, 32, 32); + mnemonic = "v4int_l"; + break; + case OE_RRR(V4PACKSC, 0, X0): + case OE_RRR(V4PACKSC, 0, X1): + case OE_RRR(V4SHLSC, 0, X0): + case OE_RRR(V4SHLSC, 0, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V4SHL, 0, X0): + case OE_RRR(V4SHL, 0, X1): + gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_shl_i32); + mnemonic = "v4shl"; + break; + case OE_RRR(V4SHRS, 0, X0): + case OE_RRR(V4SHRS, 0, X1): + gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_sar_i32); + mnemonic = "v4shrs"; + break; + case OE_RRR(V4SHRU, 0, X0): + case OE_RRR(V4SHRU, 0, X1): + gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_shr_i32); + mnemonic = "v4shru"; + break; + case OE_RRR(V4SUBSC, 0, X0): + case OE_RRR(V4SUBSC, 0, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_RRR(V4SUB, 0, X0): + case OE_RRR(V4SUB, 0, X1): + gen_v4op(tdest, tsrca, tsrcb, tcg_gen_sub_i32); + mnemonic = "v2sub"; + break; + case OE_RRR(XOR, 0, X0): + case OE_RRR(XOR, 0, X1): + case OE_RRR(XOR, 5, Y0): + case OE_RRR(XOR, 5, Y1): + tcg_gen_xor_tl(tdest, tsrca, tsrcb); + mnemonic = "xor"; + break; + default: + return TILEGX_EXCP_OPCODE_UNKNOWN; + } + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %s", mnemonic, + reg_names[dest], reg_names[srca], reg_names[srcb]); + return TILEGX_EXCP_NONE; +} + +static TileExcp gen_rri_opcode(DisasContext *dc, unsigned opext, + unsigned dest, unsigned srca, int imm) +{ + TCGv tdest = dest_gr(dc, dest); + TCGv tsrca = load_gr(dc, srca); + bool prefetch_nofault = false; + const char *mnemonic; + TCGMemOp memop; + int i2, i3; + TCGv t0; + + switch (opext) { + case OE(ADDI_OPCODE_Y0, 0, Y0): + case OE(ADDI_OPCODE_Y1, 0, Y1): + case OE_IM(ADDI, X0): + case OE_IM(ADDI, X1): + tcg_gen_addi_tl(tdest, tsrca, imm); + mnemonic = "addi"; + break; + case OE(ADDXI_OPCODE_Y0, 0, Y0): + case OE(ADDXI_OPCODE_Y1, 0, Y1): + case OE_IM(ADDXI, X0): + case OE_IM(ADDXI, X1): + tcg_gen_addi_tl(tdest, tsrca, imm); + tcg_gen_ext32s_tl(tdest, tdest); + mnemonic = "addxi"; + break; + case OE(ANDI_OPCODE_Y0, 0, Y0): + case OE(ANDI_OPCODE_Y1, 0, Y1): + case OE_IM(ANDI, X0): + case OE_IM(ANDI, X1): + tcg_gen_andi_tl(tdest, tsrca, imm); + mnemonic = "andi"; + break; + case OE(CMPEQI_OPCODE_Y0, 0, Y0): + case OE(CMPEQI_OPCODE_Y1, 0, Y1): + case OE_IM(CMPEQI, X0): + case OE_IM(CMPEQI, X1): + tcg_gen_setcondi_tl(TCG_COND_EQ, tdest, tsrca, imm); + mnemonic = "cmpeqi"; + break; + case OE(CMPLTSI_OPCODE_Y0, 0, Y0): + case OE(CMPLTSI_OPCODE_Y1, 0, Y1): + case OE_IM(CMPLTSI, X0): + case OE_IM(CMPLTSI, X1): + tcg_gen_setcondi_tl(TCG_COND_LT, tdest, tsrca, imm); + mnemonic = "cmpltsi"; + break; + case OE_IM(CMPLTUI, X0): + case OE_IM(CMPLTUI, X1): + tcg_gen_setcondi_tl(TCG_COND_LTU, tdest, tsrca, imm); + mnemonic = "cmpltui"; + break; + case OE_IM(LD1S_ADD, X1): + memop = MO_SB; + mnemonic = "ld1s_add"; /* prefetch_add_l1_fault */ + goto do_load_add; + case OE_IM(LD1U_ADD, X1): + memop = MO_UB; + mnemonic = "ld1u_add"; /* prefetch_add_l1 */ + prefetch_nofault = (dest == TILEGX_R_ZERO); + goto do_load_add; + case OE_IM(LD2S_ADD, X1): + memop = MO_TESW; + mnemonic = "ld2s_add"; /* prefetch_add_l2_fault */ + goto do_load_add; + case OE_IM(LD2U_ADD, X1): + memop = MO_TEUW; + mnemonic = "ld2u_add"; /* prefetch_add_l2 */ + prefetch_nofault = (dest == TILEGX_R_ZERO); + goto do_load_add; + case OE_IM(LD4S_ADD, X1): + memop = MO_TESL; + mnemonic = "ld4s_add"; /* prefetch_add_l3_fault */ + goto do_load_add; + case OE_IM(LD4U_ADD, X1): + memop = MO_TEUL; + mnemonic = "ld4u_add"; /* prefetch_add_l3 */ + prefetch_nofault = (dest == TILEGX_R_ZERO); + goto do_load_add; + case OE_IM(LDNT1S_ADD, X1): + memop = MO_SB; + mnemonic = "ldnt1s_add"; + goto do_load_add; + case OE_IM(LDNT1U_ADD, X1): + memop = MO_UB; + mnemonic = "ldnt1u_add"; + goto do_load_add; + case OE_IM(LDNT2S_ADD, X1): + memop = MO_TESW; + mnemonic = "ldnt2s_add"; + goto do_load_add; + case OE_IM(LDNT2U_ADD, X1): + memop = MO_TEUW; + mnemonic = "ldnt2u_add"; + goto do_load_add; + case OE_IM(LDNT4S_ADD, X1): + memop = MO_TESL; + mnemonic = "ldnt4s_add"; + goto do_load_add; + case OE_IM(LDNT4U_ADD, X1): + memop = MO_TEUL; + mnemonic = "ldnt4u_add"; + goto do_load_add; + case OE_IM(LDNT_ADD, X1): + memop = MO_TEQ; + mnemonic = "ldnt_add"; + goto do_load_add; + case OE_IM(LD_ADD, X1): + memop = MO_TEQ; + mnemonic = "ld_add"; + do_load_add: + if (!prefetch_nofault) { + tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop); + } + tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm); + break; + case OE_IM(LDNA_ADD, X1): + tcg_gen_andi_tl(tdest, tsrca, ~7); + tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ); + tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm); + mnemonic = "ldna_add"; + break; + case OE_IM(ORI, X0): + case OE_IM(ORI, X1): + tcg_gen_ori_tl(tdest, tsrca, imm); + mnemonic = "ori"; + break; + case OE_IM(V1ADDI, X0): + case OE_IM(V1ADDI, X1): + t0 = tcg_const_tl(V1_IMM(imm)); + gen_v12add(tdest, tsrca, t0, V1_IMM(0x80)); + tcg_temp_free(t0); + mnemonic = "v1addi"; + break; + case OE_IM(V1CMPEQI, X0): + case OE_IM(V1CMPEQI, X1): + tcg_gen_xori_tl(tdest, tsrca, V1_IMM(imm)); + gen_v1cmpeq0(tdest); + mnemonic = "v1cmpeqi"; + break; + case OE_IM(V1CMPLTSI, X0): + case OE_IM(V1CMPLTSI, X1): + case OE_IM(V1CMPLTUI, X0): + case OE_IM(V1CMPLTUI, X1): + case OE_IM(V1MAXUI, X0): + case OE_IM(V1MAXUI, X1): + case OE_IM(V1MINUI, X0): + case OE_IM(V1MINUI, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_IM(V2ADDI, X0): + case OE_IM(V2ADDI, X1): + t0 = tcg_const_tl(V2_IMM(imm)); + gen_v12add(tdest, tsrca, t0, V2_IMM(0x8000)); + tcg_temp_free(t0); + mnemonic = "v2addi"; + break; + case OE_IM(V2CMPEQI, X0): + case OE_IM(V2CMPEQI, X1): + case OE_IM(V2CMPLTSI, X0): + case OE_IM(V2CMPLTSI, X1): + case OE_IM(V2CMPLTUI, X0): + case OE_IM(V2CMPLTUI, X1): + case OE_IM(V2MAXSI, X0): + case OE_IM(V2MAXSI, X1): + case OE_IM(V2MINSI, X0): + case OE_IM(V2MINSI, X1): + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + case OE_IM(XORI, X0): + case OE_IM(XORI, X1): + tcg_gen_xori_tl(tdest, tsrca, imm); + mnemonic = "xori"; + break; + + case OE_SH(ROTLI, X0): + case OE_SH(ROTLI, X1): + case OE_SH(ROTLI, Y0): + case OE_SH(ROTLI, Y1): + tcg_gen_rotli_tl(tdest, tsrca, imm); + mnemonic = "rotli"; + break; + case OE_SH(SHLI, X0): + case OE_SH(SHLI, X1): + case OE_SH(SHLI, Y0): + case OE_SH(SHLI, Y1): + tcg_gen_shli_tl(tdest, tsrca, imm); + mnemonic = "shli"; + break; + case OE_SH(SHLXI, X0): + case OE_SH(SHLXI, X1): + tcg_gen_shli_tl(tdest, tsrca, imm & 31); + tcg_gen_ext32s_tl(tdest, tdest); + mnemonic = "shlxi"; + break; + case OE_SH(SHRSI, X0): + case OE_SH(SHRSI, X1): + case OE_SH(SHRSI, Y0): + case OE_SH(SHRSI, Y1): + tcg_gen_sari_tl(tdest, tsrca, imm); + mnemonic = "shrsi"; + break; + case OE_SH(SHRUI, X0): + case OE_SH(SHRUI, X1): + case OE_SH(SHRUI, Y0): + case OE_SH(SHRUI, Y1): + tcg_gen_shri_tl(tdest, tsrca, imm); + mnemonic = "shrui"; + break; + case OE_SH(SHRUXI, X0): + case OE_SH(SHRUXI, X1): + if ((imm & 31) == 0) { + tcg_gen_ext32s_tl(tdest, tsrca); + } else { + tcg_gen_ext32u_tl(tdest, tsrca); + tcg_gen_shri_tl(tdest, tdest, imm & 31); + } + mnemonic = "shlxi"; + break; + case OE_SH(V1SHLI, X0): + case OE_SH(V1SHLI, X1): + i2 = imm & 7; + i3 = 0xff >> i2; + tcg_gen_andi_tl(tdest, tsrca, V1_IMM(i3)); + tcg_gen_shli_tl(tdest, tdest, i2); + mnemonic = "v1shli"; + break; + case OE_SH(V1SHRSI, X0): + case OE_SH(V1SHRSI, X1): + t0 = tcg_const_tl(imm & 7); + gen_helper_v1shrs(tdest, tsrca, t0); + tcg_temp_free(t0); + mnemonic = "v1shrsi"; + break; + case OE_SH(V1SHRUI, X0): + case OE_SH(V1SHRUI, X1): + i2 = imm & 7; + i3 = (0xff << i2) & 0xff; + tcg_gen_andi_tl(tdest, tsrca, V1_IMM(i3)); + tcg_gen_shri_tl(tdest, tdest, i2); + mnemonic = "v1shrui"; + break; + case OE_SH(V2SHLI, X0): + case OE_SH(V2SHLI, X1): + i2 = imm & 15; + i3 = 0xffff >> i2; + tcg_gen_andi_tl(tdest, tsrca, V2_IMM(i3)); + tcg_gen_shli_tl(tdest, tdest, i2); + mnemonic = "v2shli"; + break; + case OE_SH(V2SHRSI, X0): + case OE_SH(V2SHRSI, X1): + t0 = tcg_const_tl(imm & 15); + gen_helper_v2shrs(tdest, tsrca, t0); + tcg_temp_free(t0); + mnemonic = "v2shrsi"; + break; + case OE_SH(V2SHRUI, X0): + case OE_SH(V2SHRUI, X1): + i2 = imm & 15; + i3 = (0xffff << i2) & 0xffff; + tcg_gen_andi_tl(tdest, tsrca, V2_IMM(i3)); + tcg_gen_shri_tl(tdest, tdest, i2); + mnemonic = "v2shrui"; + break; + + case OE(ADDLI_OPCODE_X0, 0, X0): + case OE(ADDLI_OPCODE_X1, 0, X1): + tcg_gen_addi_tl(tdest, tsrca, imm); + mnemonic = "addli"; + break; + case OE(ADDXLI_OPCODE_X0, 0, X0): + case OE(ADDXLI_OPCODE_X1, 0, X1): + tcg_gen_addi_tl(tdest, tsrca, imm); + tcg_gen_ext32s_tl(tdest, tdest); + mnemonic = "addxli"; + break; + case OE(SHL16INSLI_OPCODE_X0, 0, X0): + case OE(SHL16INSLI_OPCODE_X1, 0, X1): + tcg_gen_shli_tl(tdest, tsrca, 16); + tcg_gen_ori_tl(tdest, tdest, imm & 0xffff); + mnemonic = "shl16insli"; + break; + + default: + return TILEGX_EXCP_OPCODE_UNKNOWN; + } + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", mnemonic, + reg_names[dest], reg_names[srca], imm); + return TILEGX_EXCP_NONE; +} + +static TileExcp gen_bf_opcode_x0(DisasContext *dc, unsigned ext, + unsigned dest, unsigned srca, + unsigned bfs, unsigned bfe) +{ + TCGv tdest = dest_gr(dc, dest); + TCGv tsrca = load_gr(dc, srca); + TCGv tsrcd; + int len; + const char *mnemonic; + + /* The bitfield is either between E and S inclusive, + or up from S and down from E inclusive. */ + if (bfs <= bfe) { + len = bfe - bfs + 1; + } else { + len = (64 - bfs) + (bfe + 1); + } + + switch (ext) { + case BFEXTU_BF_OPCODE_X0: + if (bfs == 0 && bfe == 7) { + tcg_gen_ext8u_tl(tdest, tsrca); + } else if (bfs == 0 && bfe == 15) { + tcg_gen_ext16u_tl(tdest, tsrca); + } else if (bfs == 0 && bfe == 31) { + tcg_gen_ext32u_tl(tdest, tsrca); + } else { + int rol = 63 - bfe; + if (bfs <= bfe) { + tcg_gen_shli_tl(tdest, tsrca, rol); + } else { + tcg_gen_rotli_tl(tdest, tsrca, rol); + } + tcg_gen_shri_tl(tdest, tdest, (bfs + rol) & 63); + } + mnemonic = "bfextu"; + break; + + case BFEXTS_BF_OPCODE_X0: + if (bfs == 0 && bfe == 7) { + tcg_gen_ext8s_tl(tdest, tsrca); + } else if (bfs == 0 && bfe == 15) { + tcg_gen_ext16s_tl(tdest, tsrca); + } else if (bfs == 0 && bfe == 31) { + tcg_gen_ext32s_tl(tdest, tsrca); + } else { + int rol = 63 - bfe; + if (bfs <= bfe) { + tcg_gen_shli_tl(tdest, tsrca, rol); + } else { + tcg_gen_rotli_tl(tdest, tsrca, rol); + } + tcg_gen_sari_tl(tdest, tdest, (bfs + rol) & 63); + } + mnemonic = "bfexts"; + break; + + case BFINS_BF_OPCODE_X0: + tsrcd = load_gr(dc, dest); + if (bfs <= bfe) { + tcg_gen_deposit_tl(tdest, tsrcd, tsrca, bfs, len); + } else { + tcg_gen_rotri_tl(tdest, tsrcd, bfs); + tcg_gen_deposit_tl(tdest, tdest, tsrca, 0, len); + tcg_gen_rotli_tl(tdest, tdest, bfs); + } + mnemonic = "bfins"; + break; + + case MM_BF_OPCODE_X0: + tsrcd = load_gr(dc, dest); + if (bfs == 0) { + tcg_gen_deposit_tl(tdest, tsrca, tsrcd, 0, len); + } else { + uint64_t mask = len == 64 ? -1 : rol64((1ULL << len) - 1, bfs); + TCGv tmp = tcg_const_tl(mask); + + tcg_gen_and_tl(tdest, tsrcd, tmp); + tcg_gen_andc_tl(tmp, tsrca, tmp); + tcg_gen_or_tl(tdest, tdest, tmp); + tcg_temp_free(tmp); + } + mnemonic = "mm"; + break; + + default: + return TILEGX_EXCP_OPCODE_UNKNOWN; + } + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %u, %u", mnemonic, + reg_names[dest], reg_names[srca], bfs, bfe); + return TILEGX_EXCP_NONE; +} + +static TileExcp gen_branch_opcode_x1(DisasContext *dc, unsigned ext, + unsigned srca, int off) +{ + target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES; + const char *mnemonic; + + dc->jmp.dest = tcg_const_tl(tgt); + dc->jmp.val1 = tcg_temp_new(); + tcg_gen_mov_tl(dc->jmp.val1, load_gr(dc, srca)); + + /* Note that the "predict taken" opcodes have bit 0 clear. + Therefore, fold the two cases together by setting bit 0. */ + switch (ext | 1) { + case BEQZ_BRANCH_OPCODE_X1: + dc->jmp.cond = TCG_COND_EQ; + mnemonic = "beqz"; + break; + case BNEZ_BRANCH_OPCODE_X1: + dc->jmp.cond = TCG_COND_NE; + mnemonic = "bnez"; + break; + case BGEZ_BRANCH_OPCODE_X1: + dc->jmp.cond = TCG_COND_GE; + mnemonic = "bgez"; + break; + case BGTZ_BRANCH_OPCODE_X1: + dc->jmp.cond = TCG_COND_GT; + mnemonic = "bgtz"; + break; + case BLEZ_BRANCH_OPCODE_X1: + dc->jmp.cond = TCG_COND_LE; + mnemonic = "blez"; + break; + case BLTZ_BRANCH_OPCODE_X1: + dc->jmp.cond = TCG_COND_LT; + mnemonic = "bltz"; + break; + case BLBC_BRANCH_OPCODE_X1: + dc->jmp.cond = TCG_COND_EQ; + tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1); + mnemonic = "blbc"; + break; + case BLBS_BRANCH_OPCODE_X1: + dc->jmp.cond = TCG_COND_NE; + tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1); + mnemonic = "blbs"; + break; + default: + return TILEGX_EXCP_OPCODE_UNKNOWN; + } + + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + qemu_log("%s%s %s, " TARGET_FMT_lx " <%s>", + mnemonic, ext & 1 ? "" : "t", + reg_names[srca], tgt, lookup_symbol(tgt)); + } + return TILEGX_EXCP_NONE; +} + +static TileExcp gen_jump_opcode_x1(DisasContext *dc, unsigned ext, int off) +{ + target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES; + const char *mnemonic = "j"; + + /* The extension field is 1 bit, therefore we only have JAL and J. */ + if (ext == JAL_JUMP_OPCODE_X1) { + tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR), + dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES); + mnemonic = "jal"; + } + dc->jmp.cond = TCG_COND_ALWAYS; + dc->jmp.dest = tcg_const_tl(tgt); + + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + qemu_log("%s " TARGET_FMT_lx " <%s>", + mnemonic, tgt, lookup_symbol(tgt)); + } + return TILEGX_EXCP_NONE; +} + +typedef struct { + const char *name; + intptr_t offset; + void (*get)(TCGv, TCGv_ptr); + void (*put)(TCGv_ptr, TCGv); +} TileSPR; + +static const TileSPR *find_spr(unsigned spr) +{ + /* Allow the compiler to construct the binary search tree. */ +#define D(N, O, G, P) \ + case SPR_##N: { static const TileSPR x = { #N, O, G, P }; return &x; } + + switch (spr) { + D(CMPEXCH_VALUE, + offsetof(CPUTLGState, spregs[TILEGX_SPR_CMPEXCH]), 0, 0) + D(INTERRUPT_CRITICAL_SECTION, + offsetof(CPUTLGState, spregs[TILEGX_SPR_CRITICAL_SEC]), 0, 0) + D(SIM_CONTROL, + offsetof(CPUTLGState, spregs[TILEGX_SPR_SIM_CONTROL]), 0, 0) + D(EX_CONTEXT_0_0, + offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_0]), 0, 0) + D(EX_CONTEXT_0_1, + offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_1]), 0, 0) + } + +#undef D + + qemu_log_mask(LOG_UNIMP, "UNIMP SPR %u\n", spr); + return NULL; +} + +static TileExcp gen_mtspr_x1(DisasContext *dc, unsigned spr, unsigned srca) +{ + const TileSPR *def = find_spr(spr); + TCGv tsrca; + + if (def == NULL) { + qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr spr[%u], %s", spr, reg_names[srca]); + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + } + + tsrca = load_gr(dc, srca); + if (def->put) { + def->put(cpu_env, tsrca); + } else { + tcg_gen_st_tl(tsrca, cpu_env, def->offset); + } + qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr %s, %s", def->name, reg_names[srca]); + return TILEGX_EXCP_NONE; +} + +static TileExcp gen_mfspr_x1(DisasContext *dc, unsigned dest, unsigned spr) +{ + const TileSPR *def = find_spr(spr); + TCGv tdest; + + if (def == NULL) { + qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr %s, spr[%u]", reg_names[dest], spr); + return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; + } + + tdest = dest_gr(dc, dest); + if (def->get) { + def->get(tdest, cpu_env); + } else { + tcg_gen_ld_tl(tdest, cpu_env, def->offset); + } + qemu_log_mask(CPU_LOG_TB_IN_ASM, "mfspr %s, %s", reg_names[dest], def->name); + return TILEGX_EXCP_NONE; +} + +static TileExcp decode_y0(DisasContext *dc, tilegx_bundle_bits bundle) +{ + unsigned opc = get_Opcode_Y0(bundle); + unsigned ext = get_RRROpcodeExtension_Y0(bundle); + unsigned dest = get_Dest_Y0(bundle); + unsigned srca = get_SrcA_Y0(bundle); + unsigned srcb; + int imm; + + switch (opc) { + case RRR_1_OPCODE_Y0: + if (ext == UNARY_RRR_1_OPCODE_Y0) { + ext = get_UnaryOpcodeExtension_Y0(bundle); + return gen_rr_opcode(dc, OE(opc, ext, Y0), dest, srca, bundle); + } + /* fallthru */ + case RRR_0_OPCODE_Y0: + case RRR_2_OPCODE_Y0: + case RRR_3_OPCODE_Y0: + case RRR_4_OPCODE_Y0: + case RRR_5_OPCODE_Y0: + case RRR_6_OPCODE_Y0: + case RRR_7_OPCODE_Y0: + case RRR_8_OPCODE_Y0: + case RRR_9_OPCODE_Y0: + srcb = get_SrcB_Y0(bundle); + return gen_rrr_opcode(dc, OE(opc, ext, Y0), dest, srca, srcb); + + case SHIFT_OPCODE_Y0: + ext = get_ShiftOpcodeExtension_Y0(bundle); + imm = get_ShAmt_Y0(bundle); + return gen_rri_opcode(dc, OE(opc, ext, Y0), dest, srca, imm); + + case ADDI_OPCODE_Y0: + case ADDXI_OPCODE_Y0: + case ANDI_OPCODE_Y0: + case CMPEQI_OPCODE_Y0: + case CMPLTSI_OPCODE_Y0: + imm = (int8_t)get_Imm8_Y0(bundle); + return gen_rri_opcode(dc, OE(opc, 0, Y0), dest, srca, imm); + + default: + return TILEGX_EXCP_OPCODE_UNKNOWN; + } +} + +static TileExcp decode_y1(DisasContext *dc, tilegx_bundle_bits bundle) +{ + unsigned opc = get_Opcode_Y1(bundle); + unsigned ext = get_RRROpcodeExtension_Y1(bundle); + unsigned dest = get_Dest_Y1(bundle); + unsigned srca = get_SrcA_Y1(bundle); + unsigned srcb; + int imm; + + switch (get_Opcode_Y1(bundle)) { + case RRR_1_OPCODE_Y1: + if (ext == UNARY_RRR_1_OPCODE_Y0) { + ext = get_UnaryOpcodeExtension_Y1(bundle); + return gen_rr_opcode(dc, OE(opc, ext, Y1), dest, srca, bundle); + } + /* fallthru */ + case RRR_0_OPCODE_Y1: + case RRR_2_OPCODE_Y1: + case RRR_3_OPCODE_Y1: + case RRR_4_OPCODE_Y1: + case RRR_5_OPCODE_Y1: + case RRR_6_OPCODE_Y1: + case RRR_7_OPCODE_Y1: + srcb = get_SrcB_Y1(bundle); + return gen_rrr_opcode(dc, OE(opc, ext, Y1), dest, srca, srcb); + + case SHIFT_OPCODE_Y1: + ext = get_ShiftOpcodeExtension_Y1(bundle); + imm = get_ShAmt_Y1(bundle); + return gen_rri_opcode(dc, OE(opc, ext, Y1), dest, srca, imm); + + case ADDI_OPCODE_Y1: + case ADDXI_OPCODE_Y1: + case ANDI_OPCODE_Y1: + case CMPEQI_OPCODE_Y1: + case CMPLTSI_OPCODE_Y1: + imm = (int8_t)get_Imm8_Y1(bundle); + return gen_rri_opcode(dc, OE(opc, 0, Y1), dest, srca, imm); + + default: + return TILEGX_EXCP_OPCODE_UNKNOWN; + } +} + +static TileExcp decode_y2(DisasContext *dc, tilegx_bundle_bits bundle) +{ + unsigned mode = get_Mode(bundle); + unsigned opc = get_Opcode_Y2(bundle); + unsigned srca = get_SrcA_Y2(bundle); + unsigned srcbdest = get_SrcBDest_Y2(bundle); + const char *mnemonic; + TCGMemOp memop; + bool prefetch_nofault = false; + + switch (OEY2(opc, mode)) { + case OEY2(LD1S_OPCODE_Y2, MODE_OPCODE_YA2): + memop = MO_SB; + mnemonic = "ld1s"; /* prefetch_l1_fault */ + goto do_load; + case OEY2(LD1U_OPCODE_Y2, MODE_OPCODE_YA2): + memop = MO_UB; + mnemonic = "ld1u"; /* prefetch, prefetch_l1 */ + prefetch_nofault = (srcbdest == TILEGX_R_ZERO); + goto do_load; + case OEY2(LD2S_OPCODE_Y2, MODE_OPCODE_YA2): + memop = MO_TESW; + mnemonic = "ld2s"; /* prefetch_l2_fault */ + goto do_load; + case OEY2(LD2U_OPCODE_Y2, MODE_OPCODE_YA2): + memop = MO_TEUW; + mnemonic = "ld2u"; /* prefetch_l2 */ + prefetch_nofault = (srcbdest == TILEGX_R_ZERO); + goto do_load; + case OEY2(LD4S_OPCODE_Y2, MODE_OPCODE_YB2): + memop = MO_TESL; + mnemonic = "ld4s"; /* prefetch_l3_fault */ + goto do_load; + case OEY2(LD4U_OPCODE_Y2, MODE_OPCODE_YB2): + memop = MO_TEUL; + mnemonic = "ld4u"; /* prefetch_l3 */ + prefetch_nofault = (srcbdest == TILEGX_R_ZERO); + goto do_load; + case OEY2(LD_OPCODE_Y2, MODE_OPCODE_YB2): + memop = MO_TEQ; + mnemonic = "ld"; + do_load: + if (!prefetch_nofault) { + tcg_gen_qemu_ld_tl(dest_gr(dc, srcbdest), load_gr(dc, srca), + dc->mmuidx, memop); + } + qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic, + reg_names[srcbdest], reg_names[srca]); + return TILEGX_EXCP_NONE; + + case OEY2(ST1_OPCODE_Y2, MODE_OPCODE_YC2): + return gen_st_opcode(dc, 0, srca, srcbdest, MO_UB, "st1"); + case OEY2(ST2_OPCODE_Y2, MODE_OPCODE_YC2): + return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUW, "st2"); + case OEY2(ST4_OPCODE_Y2, MODE_OPCODE_YC2): + return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUL, "st4"); + case OEY2(ST_OPCODE_Y2, MODE_OPCODE_YC2): + return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEQ, "st"); + + default: + return TILEGX_EXCP_OPCODE_UNKNOWN; + } +} + +static TileExcp decode_x0(DisasContext *dc, tilegx_bundle_bits bundle) +{ + unsigned opc = get_Opcode_X0(bundle); + unsigned dest = get_Dest_X0(bundle); + unsigned srca = get_SrcA_X0(bundle); + unsigned ext, srcb, bfs, bfe; + int imm; + + switch (opc) { + case RRR_0_OPCODE_X0: + ext = get_RRROpcodeExtension_X0(bundle); + if (ext == UNARY_RRR_0_OPCODE_X0) { + ext = get_UnaryOpcodeExtension_X0(bundle); + return gen_rr_opcode(dc, OE(opc, ext, X0), dest, srca, bundle); + } + srcb = get_SrcB_X0(bundle); + return gen_rrr_opcode(dc, OE(opc, ext, X0), dest, srca, srcb); + + case SHIFT_OPCODE_X0: + ext = get_ShiftOpcodeExtension_X0(bundle); + imm = get_ShAmt_X0(bundle); + return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm); + + case IMM8_OPCODE_X0: + ext = get_Imm8OpcodeExtension_X0(bundle); + imm = (int8_t)get_Imm8_X0(bundle); + return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm); + + case BF_OPCODE_X0: + ext = get_BFOpcodeExtension_X0(bundle); + bfs = get_BFStart_X0(bundle); + bfe = get_BFEnd_X0(bundle); + return gen_bf_opcode_x0(dc, ext, dest, srca, bfs, bfe); + + case ADDLI_OPCODE_X0: + case SHL16INSLI_OPCODE_X0: + case ADDXLI_OPCODE_X0: + imm = (int16_t)get_Imm16_X0(bundle); + return gen_rri_opcode(dc, OE(opc, 0, X0), dest, srca, imm); + + default: + return TILEGX_EXCP_OPCODE_UNKNOWN; + } +} + +static TileExcp decode_x1(DisasContext *dc, tilegx_bundle_bits bundle) +{ + unsigned opc = get_Opcode_X1(bundle); + unsigned dest = get_Dest_X1(bundle); + unsigned srca = get_SrcA_X1(bundle); + unsigned ext, srcb; + int imm; + + switch (opc) { + case RRR_0_OPCODE_X1: + ext = get_RRROpcodeExtension_X1(bundle); + srcb = get_SrcB_X1(bundle); + switch (ext) { + case UNARY_RRR_0_OPCODE_X1: + ext = get_UnaryOpcodeExtension_X1(bundle); + return gen_rr_opcode(dc, OE(opc, ext, X1), dest, srca, bundle); + case ST1_RRR_0_OPCODE_X1: + return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "st1"); + case ST2_RRR_0_OPCODE_X1: + return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "st2"); + case ST4_RRR_0_OPCODE_X1: + return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "st4"); + case STNT1_RRR_0_OPCODE_X1: + return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "stnt1"); + case STNT2_RRR_0_OPCODE_X1: + return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "stnt2"); + case STNT4_RRR_0_OPCODE_X1: + return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "stnt4"); + case STNT_RRR_0_OPCODE_X1: + return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "stnt"); + case ST_RRR_0_OPCODE_X1: + return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "st"); + } + return gen_rrr_opcode(dc, OE(opc, ext, X1), dest, srca, srcb); + + case SHIFT_OPCODE_X1: + ext = get_ShiftOpcodeExtension_X1(bundle); + imm = get_ShAmt_X1(bundle); + return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm); + + case IMM8_OPCODE_X1: + ext = get_Imm8OpcodeExtension_X1(bundle); + imm = (int8_t)get_Dest_Imm8_X1(bundle); + srcb = get_SrcB_X1(bundle); + switch (ext) { + case ST1_ADD_IMM8_OPCODE_X1: + return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "st1_add"); + case ST2_ADD_IMM8_OPCODE_X1: + return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "st2_add"); + case ST4_ADD_IMM8_OPCODE_X1: + return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "st4_add"); + case STNT1_ADD_IMM8_OPCODE_X1: + return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "stnt1_add"); + case STNT2_ADD_IMM8_OPCODE_X1: + return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "stnt2_add"); + case STNT4_ADD_IMM8_OPCODE_X1: + return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "stnt4_add"); + case STNT_ADD_IMM8_OPCODE_X1: + return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "stnt_add"); + case ST_ADD_IMM8_OPCODE_X1: + return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "st_add"); + case MFSPR_IMM8_OPCODE_X1: + return gen_mfspr_x1(dc, dest, get_MF_Imm14_X1(bundle)); + case MTSPR_IMM8_OPCODE_X1: + return gen_mtspr_x1(dc, get_MT_Imm14_X1(bundle), srca); + } + imm = (int8_t)get_Imm8_X1(bundle); + return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm); + + case BRANCH_OPCODE_X1: + ext = get_BrType_X1(bundle); + imm = sextract32(get_BrOff_X1(bundle), 0, 17); + return gen_branch_opcode_x1(dc, ext, srca, imm); + + case JUMP_OPCODE_X1: + ext = get_JumpOpcodeExtension_X1(bundle); + imm = sextract32(get_JumpOff_X1(bundle), 0, 27); + return gen_jump_opcode_x1(dc, ext, imm); + + case ADDLI_OPCODE_X1: + case SHL16INSLI_OPCODE_X1: + case ADDXLI_OPCODE_X1: + imm = (int16_t)get_Imm16_X1(bundle); + return gen_rri_opcode(dc, OE(opc, 0, X1), dest, srca, imm); + + default: + return TILEGX_EXCP_OPCODE_UNKNOWN; + } +} + +static void notice_excp(DisasContext *dc, uint64_t bundle, + const char *type, TileExcp excp) +{ + if (likely(excp == TILEGX_EXCP_NONE)) { + return; + } + gen_exception(dc, excp); + switch (excp) { + case TILEGX_EXCP_OPCODE_UNIMPLEMENTED: + qemu_log_mask(LOG_UNIMP, "UNIMP %s, [" FMT64X "]\n", type, bundle); + break; + case TILEGX_EXCP_OPCODE_UNKNOWN: + qemu_log_mask(LOG_UNIMP, "UNKNOWN %s, [" FMT64X "]\n", type, bundle); + break; + default: + break; + } +} + +static void translate_one_bundle(DisasContext *dc, uint64_t bundle) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dc->wb); i++) { + DisasContextTemp *wb = &dc->wb[i]; + wb->reg = TILEGX_R_NOREG; + TCGV_UNUSED_I64(wb->val); + } + dc->num_wb = 0; + + qemu_log_mask(CPU_LOG_TB_IN_ASM, " %" PRIx64 ": { ", dc->pc); + if (get_Mode(bundle)) { + notice_excp(dc, bundle, "y0", decode_y0(dc, bundle)); + qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; "); + notice_excp(dc, bundle, "y1", decode_y1(dc, bundle)); + qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; "); + notice_excp(dc, bundle, "y2", decode_y2(dc, bundle)); + } else { + notice_excp(dc, bundle, "x0", decode_x0(dc, bundle)); + qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; "); + notice_excp(dc, bundle, "x1", decode_x1(dc, bundle)); + } + qemu_log_mask(CPU_LOG_TB_IN_ASM, " }\n"); + + for (i = dc->num_wb - 1; i >= 0; --i) { + DisasContextTemp *wb = &dc->wb[i]; + if (wb->reg < TILEGX_R_COUNT) { + tcg_gen_mov_i64(cpu_regs[wb->reg], wb->val); + } + tcg_temp_free_i64(wb->val); + } + + if (dc->jmp.cond != TCG_COND_NEVER) { + if (dc->jmp.cond == TCG_COND_ALWAYS) { + tcg_gen_mov_i64(cpu_pc, dc->jmp.dest); + } else { + TCGv next = tcg_const_i64(dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES); + tcg_gen_movcond_i64(dc->jmp.cond, cpu_pc, + dc->jmp.val1, load_zero(dc), + dc->jmp.dest, next); + tcg_temp_free_i64(dc->jmp.val1); + tcg_temp_free_i64(next); + } + tcg_temp_free_i64(dc->jmp.dest); + tcg_gen_exit_tb(0); + dc->exit_tb = true; + } else if (dc->atomic_excp != TILEGX_EXCP_NONE) { + gen_exception(dc, dc->atomic_excp); + } +} + +void gen_intermediate_code(CPUTLGState *env, struct TranslationBlock *tb) +{ + TileGXCPU *cpu = tilegx_env_get_cpu(env); + DisasContext ctx; + DisasContext *dc = &ctx; + CPUState *cs = CPU(cpu); + uint64_t pc_start = tb->pc; + uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + int num_insns = 0; + int max_insns = tb->cflags & CF_COUNT_MASK; + + dc->pc = pc_start; + dc->mmuidx = 0; + dc->exit_tb = false; + dc->atomic_excp = TILEGX_EXCP_NONE; + dc->jmp.cond = TCG_COND_NEVER; + TCGV_UNUSED_I64(dc->jmp.dest); + TCGV_UNUSED_I64(dc->jmp.val1); + TCGV_UNUSED_I64(dc->zero); + + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + qemu_log("IN: %s\n", lookup_symbol(pc_start)); + } + if (!max_insns) { + max_insns = CF_COUNT_MASK; + } + if (cs->singlestep_enabled || singlestep) { + max_insns = 1; + } + if (max_insns > TCG_MAX_INSNS) { + max_insns = TCG_MAX_INSNS; + } + gen_tb_start(tb); + + while (1) { + tcg_gen_insn_start(dc->pc); + num_insns++; + + translate_one_bundle(dc, cpu_ldq_data(env, dc->pc)); + + if (dc->exit_tb) { + /* PC updated and EXIT_TB/GOTO_TB/exception emitted. */ + break; + } + dc->pc += TILEGX_BUNDLE_SIZE_IN_BYTES; + if (num_insns >= max_insns + || dc->pc >= next_page_start + || tcg_op_buf_full()) { + /* Ending the TB due to TB size or page boundary. Set PC. */ + tcg_gen_movi_tl(cpu_pc, dc->pc); + tcg_gen_exit_tb(0); + break; + } + } + + gen_tb_end(tb, num_insns); + tb->size = dc->pc - pc_start; + tb->icount = num_insns; + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n"); +} + +void restore_state_to_opc(CPUTLGState *env, TranslationBlock *tb, + target_ulong *data) +{ + env->pc = data[0]; +} + +void tilegx_tcg_init(void) +{ + int i; + + cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); + cpu_pc = tcg_global_mem_new_i64(cpu_env, offsetof(CPUTLGState, pc), "pc"); + for (i = 0; i < TILEGX_R_COUNT; i++) { + cpu_regs[i] = tcg_global_mem_new_i64(cpu_env, + offsetof(CPUTLGState, regs[i]), + reg_names[i]); + } +} -- cgit 1.2.3-korg