diff options
author | Yang Zhang <yang.z.zhang@intel.com> | 2015-08-28 09:58:54 +0800 |
---|---|---|
committer | Yang Zhang <yang.z.zhang@intel.com> | 2015-09-01 12:44:00 +0800 |
commit | e44e3482bdb4d0ebde2d8b41830ac2cdb07948fb (patch) | |
tree | 66b09f592c55df2878107a468a91d21506104d3f /qemu/target-alpha | |
parent | 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (diff) |
Add qemu 2.4.0
Change-Id: Ic99cbad4b61f8b127b7dc74d04576c0bcbaaf4f5
Signed-off-by: Yang Zhang <yang.z.zhang@intel.com>
Diffstat (limited to 'qemu/target-alpha')
-rw-r--r-- | qemu/target-alpha/Makefile.objs | 4 | ||||
-rw-r--r-- | qemu/target-alpha/STATUS | 28 | ||||
-rw-r--r-- | qemu/target-alpha/cpu-qom.h | 91 | ||||
-rw-r--r-- | qemu/target-alpha/cpu.c | 325 | ||||
-rw-r--r-- | qemu/target-alpha/cpu.h | 493 | ||||
-rw-r--r-- | qemu/target-alpha/fpu_helper.c | 551 | ||||
-rw-r--r-- | qemu/target-alpha/gdbstub.c | 93 | ||||
-rw-r--r-- | qemu/target-alpha/helper.c | 498 | ||||
-rw-r--r-- | qemu/target-alpha/helper.h | 115 | ||||
-rw-r--r-- | qemu/target-alpha/int_helper.c | 251 | ||||
-rw-r--r-- | qemu/target-alpha/machine.c | 89 | ||||
-rw-r--r-- | qemu/target-alpha/mem_helper.c | 159 | ||||
-rw-r--r-- | qemu/target-alpha/sys_helper.c | 111 | ||||
-rw-r--r-- | qemu/target-alpha/translate.c | 2961 | ||||
-rw-r--r-- | qemu/target-alpha/vax_helper.c | 353 |
15 files changed, 6122 insertions, 0 deletions
diff --git a/qemu/target-alpha/Makefile.objs b/qemu/target-alpha/Makefile.objs new file mode 100644 index 000000000..63664629f --- /dev/null +++ b/qemu/target-alpha/Makefile.objs @@ -0,0 +1,4 @@ +obj-$(CONFIG_SOFTMMU) += machine.o +obj-y += translate.o helper.o cpu.o +obj-y += int_helper.o fpu_helper.o vax_helper.o sys_helper.o mem_helper.o +obj-y += gdbstub.o diff --git a/qemu/target-alpha/STATUS b/qemu/target-alpha/STATUS new file mode 100644 index 000000000..6c9744569 --- /dev/null +++ b/qemu/target-alpha/STATUS @@ -0,0 +1,28 @@ +(to be completed) + +Alpha emulation structure: +cpu.h : CPU definitions globally exported +exec.h : CPU definitions used only for translated code execution +helper.c : helpers that can be called either by the translated code + or the QEMU core, including the exception handler. +op_helper.c : helpers that can be called only from TCG +helper.h : TCG helpers prototypes +translate.c : Alpha instructions to micro-operations translator + +Code translator status: +The Alpha CPU instruction emulation should be quite complete with the +limitation that the VAX floating-point load and stores are not tested. +The 4 MMU modes are implemented. + +Linux user mode emulation status: +a few programs start to run. Most crash at a certain point, dereferencing a +NULL pointer. It seems that the UNIQUE register is not initialized properly. +It may appear that old executables, not relying on TLS support, run but +this is to be proved... + +Full system emulation status: +* Alpha PALCode emulation is in a very early stage and is not sufficient + to run any real OS. The alpha-softmmu target is not enabled for now. +* no hardware platform description is implemented +* there might be problems in the Alpha PALCode dedicated instructions + that would prevent to use a native PALCode image. diff --git a/qemu/target-alpha/cpu-qom.h b/qemu/target-alpha/cpu-qom.h new file mode 100644 index 000000000..b01c6c82e --- /dev/null +++ b/qemu/target-alpha/cpu-qom.h @@ -0,0 +1,91 @@ +/* + * QEMU Alpha CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ +#ifndef QEMU_ALPHA_CPU_QOM_H +#define QEMU_ALPHA_CPU_QOM_H + +#include "qom/cpu.h" +#include "cpu.h" + +#define TYPE_ALPHA_CPU "alpha-cpu" + +#define ALPHA_CPU_CLASS(klass) \ + OBJECT_CLASS_CHECK(AlphaCPUClass, (klass), TYPE_ALPHA_CPU) +#define ALPHA_CPU(obj) \ + OBJECT_CHECK(AlphaCPU, (obj), TYPE_ALPHA_CPU) +#define ALPHA_CPU_GET_CLASS(obj) \ + OBJECT_GET_CLASS(AlphaCPUClass, (obj), TYPE_ALPHA_CPU) + +/** + * AlphaCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * An Alpha CPU model. + */ +typedef struct AlphaCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; + void (*parent_reset)(CPUState *cpu); +} AlphaCPUClass; + +/** + * AlphaCPU: + * @env: #CPUAlphaState + * + * An Alpha CPU. + */ +typedef struct AlphaCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUAlphaState env; + + /* This alarm doesn't exist in real hardware; we wish it did. */ + QEMUTimer *alarm_timer; +} AlphaCPU; + +static inline AlphaCPU *alpha_env_get_cpu(CPUAlphaState *env) +{ + return container_of(env, AlphaCPU, env); +} + +#define ENV_GET_CPU(e) CPU(alpha_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(AlphaCPU, env) + +#ifndef CONFIG_USER_ONLY +extern const struct VMStateDescription vmstate_alpha_cpu; +#endif + +void alpha_cpu_do_interrupt(CPUState *cpu); +bool alpha_cpu_exec_interrupt(CPUState *cpu, int int_req); +void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, + int flags); +hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +int alpha_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); +int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + int is_write, int is_user, uintptr_t retaddr); + +#endif diff --git a/qemu/target-alpha/cpu.c b/qemu/target-alpha/cpu.c new file mode 100644 index 000000000..421d7e536 --- /dev/null +++ b/qemu/target-alpha/cpu.c @@ -0,0 +1,325 @@ +/* + * QEMU Alpha CPU + * + * Copyright (c) 2007 Jocelyn Mayer + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include "cpu.h" +#include "qemu-common.h" +#include "migration/vmstate.h" + + +static void alpha_cpu_set_pc(CPUState *cs, vaddr value) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + + cpu->env.pc = value; +} + +static bool alpha_cpu_has_work(CPUState *cs) +{ + /* Here we are checking to see if the CPU should wake up from HALT. + We will have gotten into this state only for WTINT from PALmode. */ + /* ??? I'm not sure how the IPL state works with WTINT to keep a CPU + asleep even if (some) interrupts have been asserted. For now, + assume that if a CPU really wants to stay asleep, it will mask + interrupts at the chipset level, which will prevent these bits + from being set in the first place. */ + return cs->interrupt_request & (CPU_INTERRUPT_HARD + | CPU_INTERRUPT_TIMER + | CPU_INTERRUPT_SMP + | CPU_INTERRUPT_MCHK); +} + +static void alpha_cpu_realizefn(DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + AlphaCPUClass *acc = ALPHA_CPU_GET_CLASS(dev); + + qemu_init_vcpu(cs); + + acc->parent_realize(dev, errp); +} + +/* Sort alphabetically by type name. */ +static gint alpha_cpu_list_compare(gconstpointer a, gconstpointer b) +{ + ObjectClass *class_a = (ObjectClass *)a; + ObjectClass *class_b = (ObjectClass *)b; + const char *name_a, *name_b; + + name_a = object_class_get_name(class_a); + name_b = object_class_get_name(class_b); + return strcmp(name_a, name_b); +} + +static void alpha_cpu_list_entry(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CPUListState *s = user_data; + + (*s->cpu_fprintf)(s->file, " %s\n", + object_class_get_name(oc)); +} + +void alpha_cpu_list(FILE *f, fprintf_function cpu_fprintf) +{ + CPUListState s = { + .file = f, + .cpu_fprintf = cpu_fprintf, + }; + GSList *list; + + list = object_class_get_list(TYPE_ALPHA_CPU, false); + list = g_slist_sort(list, alpha_cpu_list_compare); + (*cpu_fprintf)(f, "Available CPUs:\n"); + g_slist_foreach(list, alpha_cpu_list_entry, &s); + g_slist_free(list); +} + +/* Models */ + +#define TYPE(model) model "-" TYPE_ALPHA_CPU + +typedef struct AlphaCPUAlias { + const char *alias; + const char *typename; +} AlphaCPUAlias; + +static const AlphaCPUAlias alpha_cpu_aliases[] = { + { "21064", TYPE("ev4") }, + { "21164", TYPE("ev5") }, + { "21164a", TYPE("ev56") }, + { "21164pc", TYPE("pca56") }, + { "21264", TYPE("ev6") }, + { "21264a", TYPE("ev67") }, +}; + +static ObjectClass *alpha_cpu_class_by_name(const char *cpu_model) +{ + ObjectClass *oc = NULL; + char *typename; + int i; + + if (cpu_model == NULL) { + return NULL; + } + + oc = object_class_by_name(cpu_model); + if (oc != NULL && object_class_dynamic_cast(oc, TYPE_ALPHA_CPU) != NULL && + !object_class_is_abstract(oc)) { + return oc; + } + + for (i = 0; i < ARRAY_SIZE(alpha_cpu_aliases); i++) { + if (strcmp(cpu_model, alpha_cpu_aliases[i].alias) == 0) { + oc = object_class_by_name(alpha_cpu_aliases[i].typename); + assert(oc != NULL && !object_class_is_abstract(oc)); + return oc; + } + } + + typename = g_strdup_printf("%s-" TYPE_ALPHA_CPU, cpu_model); + oc = object_class_by_name(typename); + g_free(typename); + if (oc != NULL && object_class_is_abstract(oc)) { + oc = NULL; + } + return oc; +} + +AlphaCPU *cpu_alpha_init(const char *cpu_model) +{ + AlphaCPU *cpu; + ObjectClass *cpu_class; + + cpu_class = alpha_cpu_class_by_name(cpu_model); + if (cpu_class == NULL) { + /* Default to ev67; no reason not to emulate insns by default. */ + cpu_class = object_class_by_name(TYPE("ev67")); + } + cpu = ALPHA_CPU(object_new(object_class_get_name(cpu_class))); + + object_property_set_bool(OBJECT(cpu), true, "realized", NULL); + + return cpu; +} + +static void ev4_cpu_initfn(Object *obj) +{ + AlphaCPU *cpu = ALPHA_CPU(obj); + CPUAlphaState *env = &cpu->env; + + env->implver = IMPLVER_2106x; +} + +static const TypeInfo ev4_cpu_type_info = { + .name = TYPE("ev4"), + .parent = TYPE_ALPHA_CPU, + .instance_init = ev4_cpu_initfn, +}; + +static void ev5_cpu_initfn(Object *obj) +{ + AlphaCPU *cpu = ALPHA_CPU(obj); + CPUAlphaState *env = &cpu->env; + + env->implver = IMPLVER_21164; +} + +static const TypeInfo ev5_cpu_type_info = { + .name = TYPE("ev5"), + .parent = TYPE_ALPHA_CPU, + .instance_init = ev5_cpu_initfn, +}; + +static void ev56_cpu_initfn(Object *obj) +{ + AlphaCPU *cpu = ALPHA_CPU(obj); + CPUAlphaState *env = &cpu->env; + + env->amask |= AMASK_BWX; +} + +static const TypeInfo ev56_cpu_type_info = { + .name = TYPE("ev56"), + .parent = TYPE("ev5"), + .instance_init = ev56_cpu_initfn, +}; + +static void pca56_cpu_initfn(Object *obj) +{ + AlphaCPU *cpu = ALPHA_CPU(obj); + CPUAlphaState *env = &cpu->env; + + env->amask |= AMASK_MVI; +} + +static const TypeInfo pca56_cpu_type_info = { + .name = TYPE("pca56"), + .parent = TYPE("ev56"), + .instance_init = pca56_cpu_initfn, +}; + +static void ev6_cpu_initfn(Object *obj) +{ + AlphaCPU *cpu = ALPHA_CPU(obj); + CPUAlphaState *env = &cpu->env; + + env->implver = IMPLVER_21264; + env->amask = AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP; +} + +static const TypeInfo ev6_cpu_type_info = { + .name = TYPE("ev6"), + .parent = TYPE_ALPHA_CPU, + .instance_init = ev6_cpu_initfn, +}; + +static void ev67_cpu_initfn(Object *obj) +{ + AlphaCPU *cpu = ALPHA_CPU(obj); + CPUAlphaState *env = &cpu->env; + + env->amask |= AMASK_CIX | AMASK_PREFETCH; +} + +static const TypeInfo ev67_cpu_type_info = { + .name = TYPE("ev67"), + .parent = TYPE("ev6"), + .instance_init = ev67_cpu_initfn, +}; + +static const TypeInfo ev68_cpu_type_info = { + .name = TYPE("ev68"), + .parent = TYPE("ev67"), +}; + +static void alpha_cpu_initfn(Object *obj) +{ + CPUState *cs = CPU(obj); + AlphaCPU *cpu = ALPHA_CPU(obj); + CPUAlphaState *env = &cpu->env; + + cs->env_ptr = env; + cpu_exec_init(cs, &error_abort); + tlb_flush(cs, 1); + + alpha_translate_init(); + +#if defined(CONFIG_USER_ONLY) + env->ps = PS_USER_MODE; + cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD + | FPCR_UNFD | FPCR_INED | FPCR_DNOD + | FPCR_DYN_NORMAL)); +#endif + env->lock_addr = -1; + env->fen = 1; +} + +static void alpha_cpu_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + AlphaCPUClass *acc = ALPHA_CPU_CLASS(oc); + + acc->parent_realize = dc->realize; + dc->realize = alpha_cpu_realizefn; + + cc->class_by_name = alpha_cpu_class_by_name; + cc->has_work = alpha_cpu_has_work; + cc->do_interrupt = alpha_cpu_do_interrupt; + cc->cpu_exec_interrupt = alpha_cpu_exec_interrupt; + cc->dump_state = alpha_cpu_dump_state; + cc->set_pc = alpha_cpu_set_pc; + cc->gdb_read_register = alpha_cpu_gdb_read_register; + cc->gdb_write_register = alpha_cpu_gdb_write_register; +#ifdef CONFIG_USER_ONLY + cc->handle_mmu_fault = alpha_cpu_handle_mmu_fault; +#else + cc->do_unassigned_access = alpha_cpu_unassigned_access; + cc->do_unaligned_access = alpha_cpu_do_unaligned_access; + cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug; + dc->vmsd = &vmstate_alpha_cpu; +#endif + cc->gdb_num_core_regs = 67; +} + +static const TypeInfo alpha_cpu_type_info = { + .name = TYPE_ALPHA_CPU, + .parent = TYPE_CPU, + .instance_size = sizeof(AlphaCPU), + .instance_init = alpha_cpu_initfn, + .abstract = true, + .class_size = sizeof(AlphaCPUClass), + .class_init = alpha_cpu_class_init, +}; + +static void alpha_cpu_register_types(void) +{ + type_register_static(&alpha_cpu_type_info); + type_register_static(&ev4_cpu_type_info); + type_register_static(&ev5_cpu_type_info); + type_register_static(&ev56_cpu_type_info); + type_register_static(&pca56_cpu_type_info); + type_register_static(&ev6_cpu_type_info); + type_register_static(&ev67_cpu_type_info); + type_register_static(&ev68_cpu_type_info); +} + +type_init(alpha_cpu_register_types) diff --git a/qemu/target-alpha/cpu.h b/qemu/target-alpha/cpu.h new file mode 100644 index 000000000..91c56d6bc --- /dev/null +++ b/qemu/target-alpha/cpu.h @@ -0,0 +1,493 @@ +/* + * Alpha emulation cpu definitions for qemu. + * + * Copyright (c) 2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#if !defined (__CPU_ALPHA_H__) +#define __CPU_ALPHA_H__ + +#include "config.h" +#include "qemu-common.h" + +#define TARGET_LONG_BITS 64 +#define ALIGNED_ONLY + +#define CPUArchState struct CPUAlphaState + +#include "exec/cpu-defs.h" + +#include "fpu/softfloat.h" + +#define ELF_MACHINE EM_ALPHA + +#define ICACHE_LINE_SIZE 32 +#define DCACHE_LINE_SIZE 32 + +#define TARGET_PAGE_BITS 13 + +#ifdef CONFIG_USER_ONLY +/* ??? The kernel likes to give addresses in high memory. If the host has + more virtual address space than the guest, this can lead to impossible + allocations. Honor the long-standing assumption that only kernel addrs + are negative, but otherwise allow allocations anywhere. This could lead + to tricky emulation problems for programs doing tagged addressing, but + that's far fewer than encounter the impossible allocation problem. */ +#define TARGET_PHYS_ADDR_SPACE_BITS 63 +#define TARGET_VIRT_ADDR_SPACE_BITS 63 +#else +/* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */ +#define TARGET_PHYS_ADDR_SPACE_BITS 44 +#define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS) +#endif + +/* Alpha major type */ +enum { + ALPHA_EV3 = 1, + ALPHA_EV4 = 2, + ALPHA_SIM = 3, + ALPHA_LCA = 4, + ALPHA_EV5 = 5, /* 21164 */ + ALPHA_EV45 = 6, /* 21064A */ + ALPHA_EV56 = 7, /* 21164A */ +}; + +/* EV4 minor type */ +enum { + ALPHA_EV4_2 = 0, + ALPHA_EV4_3 = 1, +}; + +/* LCA minor type */ +enum { + ALPHA_LCA_1 = 1, /* 21066 */ + ALPHA_LCA_2 = 2, /* 20166 */ + ALPHA_LCA_3 = 3, /* 21068 */ + ALPHA_LCA_4 = 4, /* 21068 */ + ALPHA_LCA_5 = 5, /* 21066A */ + ALPHA_LCA_6 = 6, /* 21068A */ +}; + +/* EV5 minor type */ +enum { + ALPHA_EV5_1 = 1, /* Rev BA, CA */ + ALPHA_EV5_2 = 2, /* Rev DA, EA */ + ALPHA_EV5_3 = 3, /* Pass 3 */ + ALPHA_EV5_4 = 4, /* Pass 3.2 */ + ALPHA_EV5_5 = 5, /* Pass 4 */ +}; + +/* EV45 minor type */ +enum { + ALPHA_EV45_1 = 1, /* Pass 1 */ + ALPHA_EV45_2 = 2, /* Pass 1.1 */ + ALPHA_EV45_3 = 3, /* Pass 2 */ +}; + +/* EV56 minor type */ +enum { + ALPHA_EV56_1 = 1, /* Pass 1 */ + ALPHA_EV56_2 = 2, /* Pass 2 */ +}; + +enum { + IMPLVER_2106x = 0, /* EV4, EV45 & LCA45 */ + IMPLVER_21164 = 1, /* EV5, EV56 & PCA45 */ + IMPLVER_21264 = 2, /* EV6, EV67 & EV68x */ + IMPLVER_21364 = 3, /* EV7 & EV79 */ +}; + +enum { + AMASK_BWX = 0x00000001, + AMASK_FIX = 0x00000002, + AMASK_CIX = 0x00000004, + AMASK_MVI = 0x00000100, + AMASK_TRAP = 0x00000200, + AMASK_PREFETCH = 0x00001000, +}; + +enum { + VAX_ROUND_NORMAL = 0, + VAX_ROUND_CHOPPED, +}; + +enum { + IEEE_ROUND_NORMAL = 0, + IEEE_ROUND_DYNAMIC, + IEEE_ROUND_PLUS, + IEEE_ROUND_MINUS, + IEEE_ROUND_CHOPPED, +}; + +/* IEEE floating-point operations encoding */ +/* Trap mode */ +enum { + FP_TRAP_I = 0x0, + FP_TRAP_U = 0x1, + FP_TRAP_S = 0x4, + FP_TRAP_SU = 0x5, + FP_TRAP_SUI = 0x7, +}; + +/* Rounding mode */ +enum { + FP_ROUND_CHOPPED = 0x0, + FP_ROUND_MINUS = 0x1, + FP_ROUND_NORMAL = 0x2, + FP_ROUND_DYNAMIC = 0x3, +}; + +/* FPCR bits -- right-shifted 32 so we can use a uint32_t. */ +#define FPCR_SUM (1U << (63 - 32)) +#define FPCR_INED (1U << (62 - 32)) +#define FPCR_UNFD (1U << (61 - 32)) +#define FPCR_UNDZ (1U << (60 - 32)) +#define FPCR_DYN_SHIFT (58 - 32) +#define FPCR_DYN_CHOPPED (0U << FPCR_DYN_SHIFT) +#define FPCR_DYN_MINUS (1U << FPCR_DYN_SHIFT) +#define FPCR_DYN_NORMAL (2U << FPCR_DYN_SHIFT) +#define FPCR_DYN_PLUS (3U << FPCR_DYN_SHIFT) +#define FPCR_DYN_MASK (3U << FPCR_DYN_SHIFT) +#define FPCR_IOV (1U << (57 - 32)) +#define FPCR_INE (1U << (56 - 32)) +#define FPCR_UNF (1U << (55 - 32)) +#define FPCR_OVF (1U << (54 - 32)) +#define FPCR_DZE (1U << (53 - 32)) +#define FPCR_INV (1U << (52 - 32)) +#define FPCR_OVFD (1U << (51 - 32)) +#define FPCR_DZED (1U << (50 - 32)) +#define FPCR_INVD (1U << (49 - 32)) +#define FPCR_DNZ (1U << (48 - 32)) +#define FPCR_DNOD (1U << (47 - 32)) +#define FPCR_STATUS_MASK (FPCR_IOV | FPCR_INE | FPCR_UNF \ + | FPCR_OVF | FPCR_DZE | FPCR_INV) + +/* The silly software trap enables implemented by the kernel emulation. + These are more or less architecturally required, since the real hardware + has read-as-zero bits in the FPCR when the features aren't implemented. + For the purposes of QEMU, we pretend the FPCR can hold everything. */ +#define SWCR_TRAP_ENABLE_INV (1U << 1) +#define SWCR_TRAP_ENABLE_DZE (1U << 2) +#define SWCR_TRAP_ENABLE_OVF (1U << 3) +#define SWCR_TRAP_ENABLE_UNF (1U << 4) +#define SWCR_TRAP_ENABLE_INE (1U << 5) +#define SWCR_TRAP_ENABLE_DNO (1U << 6) +#define SWCR_TRAP_ENABLE_MASK ((1U << 7) - (1U << 1)) + +#define SWCR_MAP_DMZ (1U << 12) +#define SWCR_MAP_UMZ (1U << 13) +#define SWCR_MAP_MASK (SWCR_MAP_DMZ | SWCR_MAP_UMZ) + +#define SWCR_STATUS_INV (1U << 17) +#define SWCR_STATUS_DZE (1U << 18) +#define SWCR_STATUS_OVF (1U << 19) +#define SWCR_STATUS_UNF (1U << 20) +#define SWCR_STATUS_INE (1U << 21) +#define SWCR_STATUS_DNO (1U << 22) +#define SWCR_STATUS_MASK ((1U << 23) - (1U << 17)) + +#define SWCR_MASK (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK | SWCR_STATUS_MASK) + +/* MMU modes definitions */ + +/* Alpha has 5 MMU modes: PALcode, kernel, executive, supervisor, and user. + The Unix PALcode only exposes the kernel and user modes; presumably + executive and supervisor are used by VMS. + + PALcode itself uses physical mode for code and kernel mode for data; + there are PALmode instructions that can access data via physical mode + or via an os-installed "alternate mode", which is one of the 4 above. + + QEMU does not currently properly distinguish between code/data when + looking up addresses. To avoid having to address this issue, our + emulated PALcode will cheat and use the KSEG mapping for its code+data + rather than physical addresses. + + Moreover, we're only emulating Unix PALcode, and not attempting VMS. + + All of which allows us to drop all but kernel and user modes. + Elide the unused MMU modes to save space. */ + +#define NB_MMU_MODES 2 + +#define MMU_MODE0_SUFFIX _kernel +#define MMU_MODE1_SUFFIX _user +#define MMU_KERNEL_IDX 0 +#define MMU_USER_IDX 1 + +typedef struct CPUAlphaState CPUAlphaState; + +struct CPUAlphaState { + uint64_t ir[31]; + float64 fir[31]; + uint64_t pc; + uint64_t unique; + uint64_t lock_addr; + uint64_t lock_st_addr; + uint64_t lock_value; + + /* The FPCR, and disassembled portions thereof. */ + uint32_t fpcr; + uint32_t fpcr_exc_enable; + float_status fp_status; + uint8_t fpcr_dyn_round; + uint8_t fpcr_flush_to_zero; + + /* The Internal Processor Registers. Some of these we assume always + exist for use in user-mode. */ + uint8_t ps; + uint8_t intr_flag; + uint8_t pal_mode; + uint8_t fen; + + uint32_t pcc_ofs; + + /* These pass data from the exception logic in the translator and + helpers to the OS entry point. This is used for both system + emulation and user-mode. */ + uint64_t trap_arg0; + uint64_t trap_arg1; + uint64_t trap_arg2; + +#if !defined(CONFIG_USER_ONLY) + /* The internal data required by our emulation of the Unix PALcode. */ + uint64_t exc_addr; + uint64_t palbr; + uint64_t ptbr; + uint64_t vptptr; + uint64_t sysval; + uint64_t usp; + uint64_t shadow[8]; + uint64_t scratch[24]; +#endif + + /* This alarm doesn't exist in real hardware; we wish it did. */ + uint64_t alarm_expire; + + /* Those resources are used only in QEMU core */ + CPU_COMMON + + int error_code; + + uint32_t features; + uint32_t amask; + int implver; +}; + +#define cpu_list alpha_cpu_list +#define cpu_exec cpu_alpha_exec +#define cpu_gen_code cpu_alpha_gen_code +#define cpu_signal_handler cpu_alpha_signal_handler + +#include "exec/cpu-all.h" +#include "cpu-qom.h" + +enum { + FEATURE_ASN = 0x00000001, + FEATURE_SPS = 0x00000002, + FEATURE_VIRBND = 0x00000004, + FEATURE_TBCHK = 0x00000008, +}; + +enum { + EXCP_RESET, + EXCP_MCHK, + EXCP_SMP_INTERRUPT, + EXCP_CLK_INTERRUPT, + EXCP_DEV_INTERRUPT, + EXCP_MMFAULT, + EXCP_UNALIGN, + EXCP_OPCDEC, + EXCP_ARITH, + EXCP_FEN, + EXCP_CALL_PAL, + /* For Usermode emulation. */ + EXCP_STL_C, + EXCP_STQ_C, +}; + +/* Alpha-specific interrupt pending bits. */ +#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_EXT_0 +#define CPU_INTERRUPT_SMP CPU_INTERRUPT_TGT_EXT_1 +#define CPU_INTERRUPT_MCHK CPU_INTERRUPT_TGT_EXT_2 + +/* OSF/1 Page table bits. */ +enum { + PTE_VALID = 0x0001, + PTE_FOR = 0x0002, /* used for page protection (fault on read) */ + PTE_FOW = 0x0004, /* used for page protection (fault on write) */ + PTE_FOE = 0x0008, /* used for page protection (fault on exec) */ + PTE_ASM = 0x0010, + PTE_KRE = 0x0100, + PTE_URE = 0x0200, + PTE_KWE = 0x1000, + PTE_UWE = 0x2000 +}; + +/* Hardware interrupt (entInt) constants. */ +enum { + INT_K_IP, + INT_K_CLK, + INT_K_MCHK, + INT_K_DEV, + INT_K_PERF, +}; + +/* Memory management (entMM) constants. */ +enum { + MM_K_TNV, + MM_K_ACV, + MM_K_FOR, + MM_K_FOE, + MM_K_FOW +}; + +/* Arithmetic exception (entArith) constants. */ +enum { + EXC_M_SWC = 1, /* Software completion */ + EXC_M_INV = 2, /* Invalid operation */ + EXC_M_DZE = 4, /* Division by zero */ + EXC_M_FOV = 8, /* Overflow */ + EXC_M_UNF = 16, /* Underflow */ + EXC_M_INE = 32, /* Inexact result */ + EXC_M_IOV = 64 /* Integer Overflow */ +}; + +/* Processor status constants. */ +enum { + /* Low 3 bits are interrupt mask level. */ + PS_INT_MASK = 7, + + /* Bits 4 and 5 are the mmu mode. The VMS PALcode uses all 4 modes; + The Unix PALcode only uses bit 4. */ + PS_USER_MODE = 8 +}; + +static inline int cpu_mmu_index(CPUAlphaState *env) +{ + if (env->pal_mode) { + return MMU_KERNEL_IDX; + } else if (env->ps & PS_USER_MODE) { + return MMU_USER_IDX; + } else { + return MMU_KERNEL_IDX; + } +} + +enum { + IR_V0 = 0, + IR_T0 = 1, + IR_T1 = 2, + IR_T2 = 3, + IR_T3 = 4, + IR_T4 = 5, + IR_T5 = 6, + IR_T6 = 7, + IR_T7 = 8, + IR_S0 = 9, + IR_S1 = 10, + IR_S2 = 11, + IR_S3 = 12, + IR_S4 = 13, + IR_S5 = 14, + IR_S6 = 15, + IR_FP = IR_S6, + IR_A0 = 16, + IR_A1 = 17, + IR_A2 = 18, + IR_A3 = 19, + IR_A4 = 20, + IR_A5 = 21, + IR_T8 = 22, + IR_T9 = 23, + IR_T10 = 24, + IR_T11 = 25, + IR_RA = 26, + IR_T12 = 27, + IR_PV = IR_T12, + IR_AT = 28, + IR_GP = 29, + IR_SP = 30, + IR_ZERO = 31, +}; + +void alpha_translate_init(void); + +AlphaCPU *cpu_alpha_init(const char *cpu_model); + +#define cpu_init(cpu_model) CPU(cpu_alpha_init(cpu_model)) + +void alpha_cpu_list(FILE *f, fprintf_function cpu_fprintf); +int cpu_alpha_exec(CPUState *cpu); +/* you can call this signal handler from your SIGBUS and SIGSEGV + signal handlers to inform the virtual CPU of exceptions. non zero + is returned if the signal was handled by the virtual CPU. */ +int cpu_alpha_signal_handler(int host_signum, void *pinfo, + void *puc); +int alpha_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, + int mmu_idx); +void do_restore_state(CPUAlphaState *, uintptr_t retaddr); +void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int); +void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t); + +uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env); +void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val); +#ifndef CONFIG_USER_ONLY +void swap_shadow_regs(CPUAlphaState *env); +QEMU_NORETURN void alpha_cpu_unassigned_access(CPUState *cpu, hwaddr addr, + bool is_write, bool is_exec, + int unused, unsigned size); +#endif + +/* Bits in TB->FLAGS that control how translation is processed. */ +enum { + TB_FLAGS_PAL_MODE = 1, + TB_FLAGS_FEN = 2, + TB_FLAGS_USER_MODE = 8, + + TB_FLAGS_AMASK_SHIFT = 4, + TB_FLAGS_AMASK_BWX = AMASK_BWX << TB_FLAGS_AMASK_SHIFT, + TB_FLAGS_AMASK_FIX = AMASK_FIX << TB_FLAGS_AMASK_SHIFT, + TB_FLAGS_AMASK_CIX = AMASK_CIX << TB_FLAGS_AMASK_SHIFT, + TB_FLAGS_AMASK_MVI = AMASK_MVI << TB_FLAGS_AMASK_SHIFT, + TB_FLAGS_AMASK_TRAP = AMASK_TRAP << TB_FLAGS_AMASK_SHIFT, + TB_FLAGS_AMASK_PREFETCH = AMASK_PREFETCH << TB_FLAGS_AMASK_SHIFT, +}; + +static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc, + target_ulong *cs_base, int *pflags) +{ + int flags = 0; + + *pc = env->pc; + *cs_base = 0; + + if (env->pal_mode) { + flags = TB_FLAGS_PAL_MODE; + } else { + flags = env->ps & PS_USER_MODE; + } + if (env->fen) { + flags |= TB_FLAGS_FEN; + } + flags |= env->amask << TB_FLAGS_AMASK_SHIFT; + + *pflags = flags; +} + +#include "exec/exec-all.h" + +#endif /* !defined (__CPU_ALPHA_H__) */ diff --git a/qemu/target-alpha/fpu_helper.c b/qemu/target-alpha/fpu_helper.c new file mode 100644 index 000000000..b091aa842 --- /dev/null +++ b/qemu/target-alpha/fpu_helper.c @@ -0,0 +1,551 @@ +/* + * Helpers for floating point instructions. + * + * Copyright (c) 2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" + +#define FP_STATUS (env->fp_status) + + +void helper_setroundmode(CPUAlphaState *env, uint32_t val) +{ + set_float_rounding_mode(val, &FP_STATUS); +} + +void helper_setflushzero(CPUAlphaState *env, uint32_t val) +{ + set_flush_to_zero(val, &FP_STATUS); +} + +#define CONVERT_BIT(X, SRC, DST) \ + (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC)) + +static uint32_t soft_to_fpcr_exc(CPUAlphaState *env) +{ + uint8_t exc = get_float_exception_flags(&FP_STATUS); + uint32_t ret = 0; + + if (unlikely(exc)) { + set_float_exception_flags(0, &FP_STATUS); + ret |= CONVERT_BIT(exc, float_flag_invalid, FPCR_INV); + ret |= CONVERT_BIT(exc, float_flag_divbyzero, FPCR_DZE); + ret |= CONVERT_BIT(exc, float_flag_overflow, FPCR_OVF); + ret |= CONVERT_BIT(exc, float_flag_underflow, FPCR_UNF); + ret |= CONVERT_BIT(exc, float_flag_inexact, FPCR_INE); + } + + return ret; +} + +static void fp_exc_raise1(CPUAlphaState *env, uintptr_t retaddr, + uint32_t exc, uint32_t regno, uint32_t hw_exc) +{ + hw_exc |= CONVERT_BIT(exc, FPCR_INV, EXC_M_INV); + hw_exc |= CONVERT_BIT(exc, FPCR_DZE, EXC_M_DZE); + hw_exc |= CONVERT_BIT(exc, FPCR_OVF, EXC_M_FOV); + hw_exc |= CONVERT_BIT(exc, FPCR_UNF, EXC_M_UNF); + hw_exc |= CONVERT_BIT(exc, FPCR_INE, EXC_M_INE); + hw_exc |= CONVERT_BIT(exc, FPCR_IOV, EXC_M_IOV); + + arith_excp(env, retaddr, hw_exc, 1ull << regno); +} + +/* Raise exceptions for ieee fp insns without software completion. + In that case there are no exceptions that don't trap; the mask + doesn't apply. */ +void helper_fp_exc_raise(CPUAlphaState *env, uint32_t ignore, uint32_t regno) +{ + uint32_t exc = env->error_code; + if (exc) { + env->fpcr |= exc; + exc &= ~ignore; + if (exc) { + fp_exc_raise1(env, GETPC(), exc, regno, 0); + } + } +} + +/* Raise exceptions for ieee fp insns with software completion. */ +void helper_fp_exc_raise_s(CPUAlphaState *env, uint32_t ignore, uint32_t regno) +{ + uint32_t exc = env->error_code & ~ignore; + if (exc) { + env->fpcr |= exc; + exc &= ~ignore; + if (exc) { + exc &= env->fpcr_exc_enable; + fp_exc_raise1(env, GETPC(), exc, regno, EXC_M_SWC); + } + } +} + +/* Input handing without software completion. Trap for all + non-finite numbers. */ +void helper_ieee_input(CPUAlphaState *env, uint64_t val) +{ + uint32_t exp = (uint32_t)(val >> 52) & 0x7ff; + uint64_t frac = val & 0xfffffffffffffull; + + if (exp == 0) { + /* Denormals without /S raise an exception. */ + if (frac != 0) { + arith_excp(env, GETPC(), EXC_M_INV, 0); + } + } else if (exp == 0x7ff) { + /* Infinity or NaN. */ + env->fpcr |= FPCR_INV; + arith_excp(env, GETPC(), EXC_M_INV, 0); + } +} + +/* Similar, but does not trap for infinities. Used for comparisons. */ +void helper_ieee_input_cmp(CPUAlphaState *env, uint64_t val) +{ + uint32_t exp = (uint32_t)(val >> 52) & 0x7ff; + uint64_t frac = val & 0xfffffffffffffull; + + if (exp == 0) { + /* Denormals without /S raise an exception. */ + if (frac != 0) { + arith_excp(env, GETPC(), EXC_M_INV, 0); + } + } else if (exp == 0x7ff && frac) { + /* NaN. */ + env->fpcr |= FPCR_INV; + arith_excp(env, GETPC(), EXC_M_INV, 0); + } +} + +/* Input handing with software completion. Trap for denorms, unless DNZ + is set. If we try to support DNOD (which none of the produced hardware + did, AFAICS), we'll need to suppress the trap when FPCR.DNOD is set; + then the code downstream of that will need to cope with denorms sans + flush_input_to_zero. Most of it should work sanely, but there's + nothing to compare with. */ +void helper_ieee_input_s(CPUAlphaState *env, uint64_t val) +{ + if (unlikely(2 * val - 1 < 0x1fffffffffffffull) + && !env->fp_status.flush_inputs_to_zero) { + arith_excp(env, GETPC(), EXC_M_INV | EXC_M_SWC, 0); + } +} + +/* S floating (single) */ + +/* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */ +static inline uint64_t float32_to_s_int(uint32_t fi) +{ + uint32_t frac = fi & 0x7fffff; + uint32_t sign = fi >> 31; + uint32_t exp_msb = (fi >> 30) & 1; + uint32_t exp_low = (fi >> 23) & 0x7f; + uint32_t exp; + + exp = (exp_msb << 10) | exp_low; + if (exp_msb) { + if (exp_low == 0x7f) { + exp = 0x7ff; + } + } else { + if (exp_low != 0x00) { + exp |= 0x380; + } + } + + return (((uint64_t)sign << 63) + | ((uint64_t)exp << 52) + | ((uint64_t)frac << 29)); +} + +static inline uint64_t float32_to_s(float32 fa) +{ + CPU_FloatU a; + a.f = fa; + return float32_to_s_int(a.l); +} + +static inline uint32_t s_to_float32_int(uint64_t a) +{ + return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff); +} + +static inline float32 s_to_float32(uint64_t a) +{ + CPU_FloatU r; + r.l = s_to_float32_int(a); + return r.f; +} + +uint32_t helper_s_to_memory(uint64_t a) +{ + return s_to_float32_int(a); +} + +uint64_t helper_memory_to_s(uint32_t a) +{ + return float32_to_s_int(a); +} + +uint64_t helper_adds(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = s_to_float32(a); + fb = s_to_float32(b); + fr = float32_add(fa, fb, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float32_to_s(fr); +} + +uint64_t helper_subs(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = s_to_float32(a); + fb = s_to_float32(b); + fr = float32_sub(fa, fb, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float32_to_s(fr); +} + +uint64_t helper_muls(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = s_to_float32(a); + fb = s_to_float32(b); + fr = float32_mul(fa, fb, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float32_to_s(fr); +} + +uint64_t helper_divs(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = s_to_float32(a); + fb = s_to_float32(b); + fr = float32_div(fa, fb, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float32_to_s(fr); +} + +uint64_t helper_sqrts(CPUAlphaState *env, uint64_t a) +{ + float32 fa, fr; + + fa = s_to_float32(a); + fr = float32_sqrt(fa, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float32_to_s(fr); +} + + +/* T floating (double) */ +static inline float64 t_to_float64(uint64_t a) +{ + /* Memory format is the same as float64 */ + CPU_DoubleU r; + r.ll = a; + return r.d; +} + +static inline uint64_t float64_to_t(float64 fa) +{ + /* Memory format is the same as float64 */ + CPU_DoubleU r; + r.d = fa; + return r.ll; +} + +uint64_t helper_addt(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = t_to_float64(a); + fb = t_to_float64(b); + fr = float64_add(fa, fb, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float64_to_t(fr); +} + +uint64_t helper_subt(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = t_to_float64(a); + fb = t_to_float64(b); + fr = float64_sub(fa, fb, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float64_to_t(fr); +} + +uint64_t helper_mult(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = t_to_float64(a); + fb = t_to_float64(b); + fr = float64_mul(fa, fb, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float64_to_t(fr); +} + +uint64_t helper_divt(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = t_to_float64(a); + fb = t_to_float64(b); + fr = float64_div(fa, fb, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float64_to_t(fr); +} + +uint64_t helper_sqrtt(CPUAlphaState *env, uint64_t a) +{ + float64 fa, fr; + + fa = t_to_float64(a); + fr = float64_sqrt(fa, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float64_to_t(fr); +} + +/* Comparisons */ +uint64_t helper_cmptun(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + fa = t_to_float64(a); + fb = t_to_float64(b); + + if (float64_unordered_quiet(fa, fb, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_fpcr_exc(env); + + return ret; +} + +uint64_t helper_cmpteq(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + fa = t_to_float64(a); + fb = t_to_float64(b); + + if (float64_eq_quiet(fa, fb, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_fpcr_exc(env); + + return ret; +} + +uint64_t helper_cmptle(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + fa = t_to_float64(a); + fb = t_to_float64(b); + + if (float64_le(fa, fb, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_fpcr_exc(env); + + return ret; +} + +uint64_t helper_cmptlt(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + fa = t_to_float64(a); + fb = t_to_float64(b); + + if (float64_lt(fa, fb, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_fpcr_exc(env); + + return ret; +} + +/* Floating point format conversion */ +uint64_t helper_cvtts(CPUAlphaState *env, uint64_t a) +{ + float64 fa; + float32 fr; + + fa = t_to_float64(a); + fr = float64_to_float32(fa, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float32_to_s(fr); +} + +uint64_t helper_cvtst(CPUAlphaState *env, uint64_t a) +{ + float32 fa; + float64 fr; + + fa = s_to_float32(a); + fr = float32_to_float64(fa, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float64_to_t(fr); +} + +uint64_t helper_cvtqs(CPUAlphaState *env, uint64_t a) +{ + float32 fr = int64_to_float32(a, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + + return float32_to_s(fr); +} + +/* Implement float64 to uint64 conversion without saturation -- we must + supply the truncated result. This behaviour is used by the compiler + to get unsigned conversion for free with the same instruction. */ + +static uint64_t do_cvttq(CPUAlphaState *env, uint64_t a, int roundmode) +{ + uint64_t frac, ret = 0; + uint32_t exp, sign, exc = 0; + int shift; + + sign = (a >> 63); + exp = (uint32_t)(a >> 52) & 0x7ff; + frac = a & 0xfffffffffffffull; + + if (exp == 0) { + if (unlikely(frac != 0) && !env->fp_status.flush_inputs_to_zero) { + goto do_underflow; + } + } else if (exp == 0x7ff) { + exc = FPCR_INV; + } else { + /* Restore implicit bit. */ + frac |= 0x10000000000000ull; + + shift = exp - 1023 - 52; + if (shift >= 0) { + /* In this case the number is so large that we must shift + the fraction left. There is no rounding to do. */ + if (shift < 64) { + ret = frac << shift; + } + /* Check for overflow. Note the special case of -0x1p63. */ + if (shift >= 11 && a != 0xC3E0000000000000ull) { + exc = FPCR_IOV | FPCR_INE; + } + } else { + uint64_t round; + + /* In this case the number is smaller than the fraction as + represented by the 52 bit number. Here we must think + about rounding the result. Handle this by shifting the + fractional part of the number into the high bits of ROUND. + This will let us efficiently handle round-to-nearest. */ + shift = -shift; + if (shift < 63) { + ret = frac >> shift; + round = frac << (64 - shift); + } else { + /* The exponent is so small we shift out everything. + Leave a sticky bit for proper rounding below. */ + do_underflow: + round = 1; + } + + if (round) { + exc = FPCR_INE; + switch (roundmode) { + case float_round_nearest_even: + if (round == (1ull << 63)) { + /* Fraction is exactly 0.5; round to even. */ + ret += (ret & 1); + } else if (round > (1ull << 63)) { + ret += 1; + } + break; + case float_round_to_zero: + break; + case float_round_up: + ret += 1 - sign; + break; + case float_round_down: + ret += sign; + break; + } + } + } + if (sign) { + ret = -ret; + } + } + env->error_code = exc; + + return ret; +} + +uint64_t helper_cvttq(CPUAlphaState *env, uint64_t a) +{ + return do_cvttq(env, a, FP_STATUS.float_rounding_mode); +} + +uint64_t helper_cvttq_c(CPUAlphaState *env, uint64_t a) +{ + return do_cvttq(env, a, float_round_to_zero); +} + +uint64_t helper_cvtqt(CPUAlphaState *env, uint64_t a) +{ + float64 fr = int64_to_float64(a, &FP_STATUS); + env->error_code = soft_to_fpcr_exc(env); + return float64_to_t(fr); +} + +uint64_t helper_cvtql(CPUAlphaState *env, uint64_t val) +{ + uint32_t exc = 0; + if (val != (int32_t)val) { + exc = FPCR_IOV | FPCR_INE; + } + env->error_code = exc; + + return ((val & 0xc0000000) << 32) | ((val & 0x3fffffff) << 29); +} diff --git a/qemu/target-alpha/gdbstub.c b/qemu/target-alpha/gdbstub.c new file mode 100644 index 000000000..980f140e7 --- /dev/null +++ b/qemu/target-alpha/gdbstub.c @@ -0,0 +1,93 @@ +/* + * Alpha gdb server stub + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2013 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include "config.h" +#include "qemu-common.h" +#include "exec/gdbstub.h" + +int alpha_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + uint64_t val; + CPU_DoubleU d; + + switch (n) { + case 0 ... 30: + val = env->ir[n]; + break; + case 32 ... 62: + d.d = env->fir[n - 32]; + val = d.ll; + break; + case 63: + val = cpu_alpha_load_fpcr(env); + break; + case 64: + val = env->pc; + break; + case 66: + val = env->unique; + break; + case 31: + case 65: + /* 31 really is the zero register; 65 is unassigned in the + gdb protocol, but is still required to occupy 8 bytes. */ + val = 0; + break; + default: + return 0; + } + return gdb_get_regl(mem_buf, val); +} + +int alpha_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + target_ulong tmp = ldtul_p(mem_buf); + CPU_DoubleU d; + + switch (n) { + case 0 ... 30: + env->ir[n] = tmp; + break; + case 32 ... 62: + d.ll = tmp; + env->fir[n - 32] = d.d; + break; + case 63: + cpu_alpha_store_fpcr(env, tmp); + break; + case 64: + env->pc = tmp; + break; + case 66: + env->unique = tmp; + break; + case 31: + case 65: + /* 31 really is the zero register; 65 is unassigned in the + gdb protocol, but is still required to occupy 8 bytes. */ + break; + default: + return 0; + } + return 8; +} diff --git a/qemu/target-alpha/helper.c b/qemu/target-alpha/helper.c new file mode 100644 index 000000000..46b8ef914 --- /dev/null +++ b/qemu/target-alpha/helper.c @@ -0,0 +1,498 @@ +/* + * Alpha emulation cpu helpers for qemu. + * + * Copyright (c) 2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <stdint.h> +#include <stdlib.h> +#include <stdio.h> + +#include "cpu.h" +#include "fpu/softfloat.h" +#include "exec/helper-proto.h" + + +#define CONVERT_BIT(X, SRC, DST) \ + (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC)) + +uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env) +{ + return (uint64_t)env->fpcr << 32; +} + +void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val) +{ + uint32_t fpcr = val >> 32; + uint32_t t = 0; + + t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE); + t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF); + t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF); + t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE); + t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV); + + env->fpcr = fpcr; + env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK; + + switch (fpcr & FPCR_DYN_MASK) { + case FPCR_DYN_NORMAL: + default: + t = float_round_nearest_even; + break; + case FPCR_DYN_CHOPPED: + t = float_round_to_zero; + break; + case FPCR_DYN_MINUS: + t = float_round_down; + break; + case FPCR_DYN_PLUS: + t = float_round_up; + break; + } + env->fpcr_dyn_round = t; + + env->fpcr_flush_to_zero = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ); + env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0; +} + +uint64_t helper_load_fpcr(CPUAlphaState *env) +{ + return cpu_alpha_load_fpcr(env); +} + +void helper_store_fpcr(CPUAlphaState *env, uint64_t val) +{ + cpu_alpha_store_fpcr(env, val); +} + +#if defined(CONFIG_USER_ONLY) +int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address, + int rw, int mmu_idx) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + + cs->exception_index = EXCP_MMFAULT; + cpu->env.trap_arg0 = address; + return 1; +} +#else +void swap_shadow_regs(CPUAlphaState *env) +{ + uint64_t i0, i1, i2, i3, i4, i5, i6, i7; + + i0 = env->ir[8]; + i1 = env->ir[9]; + i2 = env->ir[10]; + i3 = env->ir[11]; + i4 = env->ir[12]; + i5 = env->ir[13]; + i6 = env->ir[14]; + i7 = env->ir[25]; + + env->ir[8] = env->shadow[0]; + env->ir[9] = env->shadow[1]; + env->ir[10] = env->shadow[2]; + env->ir[11] = env->shadow[3]; + env->ir[12] = env->shadow[4]; + env->ir[13] = env->shadow[5]; + env->ir[14] = env->shadow[6]; + env->ir[25] = env->shadow[7]; + + env->shadow[0] = i0; + env->shadow[1] = i1; + env->shadow[2] = i2; + env->shadow[3] = i3; + env->shadow[4] = i4; + env->shadow[5] = i5; + env->shadow[6] = i6; + env->shadow[7] = i7; +} + +/* Returns the OSF/1 entMM failure indication, or -1 on success. */ +static int get_physical_address(CPUAlphaState *env, target_ulong addr, + int prot_need, int mmu_idx, + target_ulong *pphys, int *pprot) +{ + CPUState *cs = CPU(alpha_env_get_cpu(env)); + target_long saddr = addr; + target_ulong phys = 0; + target_ulong L1pte, L2pte, L3pte; + target_ulong pt, index; + int prot = 0; + int ret = MM_K_ACV; + + /* Ensure that the virtual address is properly sign-extended from + the last implemented virtual address bit. */ + if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) { + goto exit; + } + + /* Translate the superpage. */ + /* ??? When we do more than emulate Unix PALcode, we'll need to + determine which KSEG is actually active. */ + if (saddr < 0 && ((saddr >> 41) & 3) == 2) { + /* User-space cannot access KSEG addresses. */ + if (mmu_idx != MMU_KERNEL_IDX) { + goto exit; + } + + /* For the benefit of the Typhoon chipset, move bit 40 to bit 43. + We would not do this if the 48-bit KSEG is enabled. */ + phys = saddr & ((1ull << 40) - 1); + phys |= (saddr & (1ull << 40)) << 3; + + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + ret = -1; + goto exit; + } + + /* Interpret the page table exactly like PALcode does. */ + + pt = env->ptbr; + + /* L1 page table read. */ + index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff; + L1pte = ldq_phys(cs->as, pt + index*8); + + if (unlikely((L1pte & PTE_VALID) == 0)) { + ret = MM_K_TNV; + goto exit; + } + if (unlikely((L1pte & PTE_KRE) == 0)) { + goto exit; + } + pt = L1pte >> 32 << TARGET_PAGE_BITS; + + /* L2 page table read. */ + index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff; + L2pte = ldq_phys(cs->as, pt + index*8); + + if (unlikely((L2pte & PTE_VALID) == 0)) { + ret = MM_K_TNV; + goto exit; + } + if (unlikely((L2pte & PTE_KRE) == 0)) { + goto exit; + } + pt = L2pte >> 32 << TARGET_PAGE_BITS; + + /* L3 page table read. */ + index = (addr >> TARGET_PAGE_BITS) & 0x3ff; + L3pte = ldq_phys(cs->as, pt + index*8); + + phys = L3pte >> 32 << TARGET_PAGE_BITS; + if (unlikely((L3pte & PTE_VALID) == 0)) { + ret = MM_K_TNV; + goto exit; + } + +#if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4 +# error page bits out of date +#endif + + /* Check access violations. */ + if (L3pte & (PTE_KRE << mmu_idx)) { + prot |= PAGE_READ | PAGE_EXEC; + } + if (L3pte & (PTE_KWE << mmu_idx)) { + prot |= PAGE_WRITE; + } + if (unlikely((prot & prot_need) == 0 && prot_need)) { + goto exit; + } + + /* Check fault-on-operation violations. */ + prot &= ~(L3pte >> 1); + ret = -1; + if (unlikely((prot & prot_need) == 0)) { + ret = (prot_need & PAGE_EXEC ? MM_K_FOE : + prot_need & PAGE_WRITE ? MM_K_FOW : + prot_need & PAGE_READ ? MM_K_FOR : -1); + } + + exit: + *pphys = phys; + *pprot = prot; + return ret; +} + +hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + target_ulong phys; + int prot, fail; + + fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot); + return (fail >= 0 ? -1 : phys); +} + +int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int rw, + int mmu_idx) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + target_ulong phys; + int prot, fail; + + fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot); + if (unlikely(fail >= 0)) { + cs->exception_index = EXCP_MMFAULT; + env->trap_arg0 = addr; + env->trap_arg1 = fail; + env->trap_arg2 = (rw == 2 ? -1 : rw); + return 1; + } + + tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, + prot, mmu_idx, TARGET_PAGE_SIZE); + return 0; +} +#endif /* USER_ONLY */ + +void alpha_cpu_do_interrupt(CPUState *cs) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + int i = cs->exception_index; + + if (qemu_loglevel_mask(CPU_LOG_INT)) { + static int count; + const char *name = "<unknown>"; + + switch (i) { + case EXCP_RESET: + name = "reset"; + break; + case EXCP_MCHK: + name = "mchk"; + break; + case EXCP_SMP_INTERRUPT: + name = "smp_interrupt"; + break; + case EXCP_CLK_INTERRUPT: + name = "clk_interrupt"; + break; + case EXCP_DEV_INTERRUPT: + name = "dev_interrupt"; + break; + case EXCP_MMFAULT: + name = "mmfault"; + break; + case EXCP_UNALIGN: + name = "unalign"; + break; + case EXCP_OPCDEC: + name = "opcdec"; + break; + case EXCP_ARITH: + name = "arith"; + break; + case EXCP_FEN: + name = "fen"; + break; + case EXCP_CALL_PAL: + name = "call_pal"; + break; + case EXCP_STL_C: + name = "stl_c"; + break; + case EXCP_STQ_C: + name = "stq_c"; + break; + } + qemu_log("INT %6d: %s(%#x) pc=%016" PRIx64 " sp=%016" PRIx64 "\n", + ++count, name, env->error_code, env->pc, env->ir[IR_SP]); + } + + cs->exception_index = -1; + +#if !defined(CONFIG_USER_ONLY) + switch (i) { + case EXCP_RESET: + i = 0x0000; + break; + case EXCP_MCHK: + i = 0x0080; + break; + case EXCP_SMP_INTERRUPT: + i = 0x0100; + break; + case EXCP_CLK_INTERRUPT: + i = 0x0180; + break; + case EXCP_DEV_INTERRUPT: + i = 0x0200; + break; + case EXCP_MMFAULT: + i = 0x0280; + break; + case EXCP_UNALIGN: + i = 0x0300; + break; + case EXCP_OPCDEC: + i = 0x0380; + break; + case EXCP_ARITH: + i = 0x0400; + break; + case EXCP_FEN: + i = 0x0480; + break; + case EXCP_CALL_PAL: + i = env->error_code; + /* There are 64 entry points for both privileged and unprivileged, + with bit 0x80 indicating unprivileged. Each entry point gets + 64 bytes to do its job. */ + if (i & 0x80) { + i = 0x2000 + (i - 0x80) * 64; + } else { + i = 0x1000 + i * 64; + } + break; + default: + cpu_abort(cs, "Unhandled CPU exception"); + } + + /* Remember where the exception happened. Emulate real hardware in + that the low bit of the PC indicates PALmode. */ + env->exc_addr = env->pc | env->pal_mode; + + /* Continue execution at the PALcode entry point. */ + env->pc = env->palbr + i; + + /* Switch to PALmode. */ + if (!env->pal_mode) { + env->pal_mode = 1; + swap_shadow_regs(env); + } +#endif /* !USER_ONLY */ +} + +bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + int idx = -1; + + /* We never take interrupts while in PALmode. */ + if (env->pal_mode) { + return false; + } + + /* Fall through the switch, collecting the highest priority + interrupt that isn't masked by the processor status IPL. */ + /* ??? This hard-codes the OSF/1 interrupt levels. */ + switch (env->ps & PS_INT_MASK) { + case 0 ... 3: + if (interrupt_request & CPU_INTERRUPT_HARD) { + idx = EXCP_DEV_INTERRUPT; + } + /* FALLTHRU */ + case 4: + if (interrupt_request & CPU_INTERRUPT_TIMER) { + idx = EXCP_CLK_INTERRUPT; + } + /* FALLTHRU */ + case 5: + if (interrupt_request & CPU_INTERRUPT_SMP) { + idx = EXCP_SMP_INTERRUPT; + } + /* FALLTHRU */ + case 6: + if (interrupt_request & CPU_INTERRUPT_MCHK) { + idx = EXCP_MCHK; + } + } + if (idx >= 0) { + cs->exception_index = idx; + env->error_code = 0; + alpha_cpu_do_interrupt(cs); + return true; + } + return false; +} + +void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, + int flags) +{ + static const char *linux_reg_names[] = { + "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ", + "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ", + "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ", + "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero", + }; + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + int i; + + cpu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n", + env->pc, env->ps); + for (i = 0; i < 31; i++) { + cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i, + linux_reg_names[i], env->ir[i]); + if ((i % 3) == 2) + cpu_fprintf(f, "\n"); + } + + cpu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n", + env->lock_addr, env->lock_value); + + for (i = 0; i < 31; i++) { + cpu_fprintf(f, "FIR%02d " TARGET_FMT_lx " ", i, + *((uint64_t *)(&env->fir[i]))); + if ((i % 3) == 2) + cpu_fprintf(f, "\n"); + } + cpu_fprintf(f, "\n"); +} + +/* This should only be called from translate, via gen_excp. + We expect that ENV->PC has already been updated. */ +void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error) +{ + AlphaCPU *cpu = alpha_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + cs->exception_index = excp; + env->error_code = error; + cpu_loop_exit(cs); +} + +/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */ +void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr, + int excp, int error) +{ + AlphaCPU *cpu = alpha_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + cs->exception_index = excp; + env->error_code = error; + if (retaddr) { + cpu_restore_state(cs, retaddr); + /* Floating-point exceptions (our only users) point to the next PC. */ + env->pc += 4; + } + cpu_loop_exit(cs); +} + +void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr, + int exc, uint64_t mask) +{ + env->trap_arg0 = exc; + env->trap_arg1 = mask; + dynamic_excp(env, retaddr, EXCP_ARITH, 0); +} diff --git a/qemu/target-alpha/helper.h b/qemu/target-alpha/helper.h new file mode 100644 index 000000000..d221f0d7d --- /dev/null +++ b/qemu/target-alpha/helper.h @@ -0,0 +1,115 @@ +DEF_HELPER_3(excp, noreturn, env, int, int) +DEF_HELPER_FLAGS_1(load_pcc, TCG_CALL_NO_RWG_SE, i64, env) + +DEF_HELPER_FLAGS_3(check_overflow, TCG_CALL_NO_WG, void, env, i64, i64) + +DEF_HELPER_FLAGS_1(ctpop, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(ctlz, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(cttz, TCG_CALL_NO_RWG_SE, i64, i64) + +DEF_HELPER_FLAGS_2(zap, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(zapnot, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(cmpbge, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(minub8, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(minsb8, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(minuw4, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(minsw4, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(maxub8, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(maxsb8, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(maxuw4, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(maxsw4, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(perr, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_1(pklb, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(pkwb, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(unpkbl, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(unpkbw, TCG_CALL_NO_RWG_SE, i64, i64) + +DEF_HELPER_FLAGS_1(load_fpcr, TCG_CALL_NO_RWG_SE, i64, env) +DEF_HELPER_FLAGS_2(store_fpcr, TCG_CALL_NO_RWG, void, env, i64) + +DEF_HELPER_FLAGS_1(f_to_memory, TCG_CALL_NO_RWG_SE, i32, i64) +DEF_HELPER_FLAGS_1(memory_to_f, TCG_CALL_NO_RWG_SE, i64, i32) +DEF_HELPER_FLAGS_3(addf, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(subf, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(mulf, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(divf, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(sqrtf, TCG_CALL_NO_RWG, i64, env, i64) + +DEF_HELPER_FLAGS_1(g_to_memory, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(memory_to_g, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_3(addg, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(subg, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(mulg, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(divg, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(sqrtg, TCG_CALL_NO_RWG, i64, env, i64) + +DEF_HELPER_FLAGS_1(s_to_memory, TCG_CALL_NO_RWG_SE, i32, i64) +DEF_HELPER_FLAGS_1(memory_to_s, TCG_CALL_NO_RWG_SE, i64, i32) +DEF_HELPER_FLAGS_3(adds, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(subs, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(muls, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(divs, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(sqrts, TCG_CALL_NO_RWG, i64, env, i64) + +DEF_HELPER_FLAGS_3(addt, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(subt, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(mult, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(divt, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(sqrtt, TCG_CALL_NO_RWG, i64, env, i64) + +DEF_HELPER_FLAGS_3(cmptun, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(cmpteq, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(cmptle, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(cmptlt, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(cmpgeq, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(cmpgle, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(cmpglt, TCG_CALL_NO_RWG, i64, env, i64, i64) + +DEF_HELPER_FLAGS_2(cvtts, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(cvtst, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(cvtqs, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(cvtqt, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(cvtqf, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(cvtgf, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(cvtgq, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(cvtqg, TCG_CALL_NO_RWG, i64, env, i64) + +DEF_HELPER_FLAGS_2(cvttq, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(cvttq_c, TCG_CALL_NO_RWG, i64, env, i64) + +DEF_HELPER_FLAGS_2(cvtql, TCG_CALL_NO_RWG, i64, env, i64) + +DEF_HELPER_FLAGS_2(setroundmode, TCG_CALL_NO_RWG, void, env, i32) +DEF_HELPER_FLAGS_2(setflushzero, TCG_CALL_NO_RWG, void, env, i32) +DEF_HELPER_FLAGS_3(fp_exc_raise, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_3(fp_exc_raise_s, TCG_CALL_NO_WG, void, env, i32, i32) + +DEF_HELPER_FLAGS_2(ieee_input, TCG_CALL_NO_WG, void, env, i64) +DEF_HELPER_FLAGS_2(ieee_input_cmp, TCG_CALL_NO_WG, void, env, i64) +DEF_HELPER_FLAGS_2(ieee_input_s, TCG_CALL_NO_WG, void, env, i64) + +#if !defined (CONFIG_USER_ONLY) +DEF_HELPER_2(hw_ret, void, env, i64) +DEF_HELPER_3(call_pal, void, env, i64, i64) + +DEF_HELPER_2(ldl_phys, i64, env, i64) +DEF_HELPER_2(ldq_phys, i64, env, i64) +DEF_HELPER_2(ldl_l_phys, i64, env, i64) +DEF_HELPER_2(ldq_l_phys, i64, env, i64) +DEF_HELPER_3(stl_phys, void, env, i64, i64) +DEF_HELPER_3(stq_phys, void, env, i64, i64) +DEF_HELPER_3(stl_c_phys, i64, env, i64, i64) +DEF_HELPER_3(stq_c_phys, i64, env, i64, i64) + +DEF_HELPER_FLAGS_1(tbia, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_2(tbis, TCG_CALL_NO_RWG, void, env, i64) +DEF_HELPER_FLAGS_1(tb_flush, TCG_CALL_NO_RWG, void, env) + +DEF_HELPER_1(halt, void, i64) + +DEF_HELPER_FLAGS_0(get_vmtime, TCG_CALL_NO_RWG, i64) +DEF_HELPER_FLAGS_0(get_walltime, TCG_CALL_NO_RWG, i64) +DEF_HELPER_FLAGS_2(set_alarm, TCG_CALL_NO_RWG, void, env, i64) +#endif diff --git a/qemu/target-alpha/int_helper.c b/qemu/target-alpha/int_helper.c new file mode 100644 index 000000000..74f38cbe7 --- /dev/null +++ b/qemu/target-alpha/int_helper.c @@ -0,0 +1,251 @@ +/* + * Helpers for integer and multimedia instructions. + * + * Copyright (c) 2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "qemu/host-utils.h" + + +uint64_t helper_ctpop(uint64_t arg) +{ + return ctpop64(arg); +} + +uint64_t helper_ctlz(uint64_t arg) +{ + return clz64(arg); +} + +uint64_t helper_cttz(uint64_t arg) +{ + return ctz64(arg); +} + +uint64_t helper_zapnot(uint64_t val, uint64_t mskb) +{ + uint64_t mask; + + mask = -(mskb & 0x01) & 0x00000000000000ffull; + mask |= -(mskb & 0x02) & 0x000000000000ff00ull; + mask |= -(mskb & 0x04) & 0x0000000000ff0000ull; + mask |= -(mskb & 0x08) & 0x00000000ff000000ull; + mask |= -(mskb & 0x10) & 0x000000ff00000000ull; + mask |= -(mskb & 0x20) & 0x0000ff0000000000ull; + mask |= -(mskb & 0x40) & 0x00ff000000000000ull; + mask |= -(mskb & 0x80) & 0xff00000000000000ull; + + return val & mask; +} + +uint64_t helper_zap(uint64_t val, uint64_t mask) +{ + return helper_zapnot(val, ~mask); +} + +uint64_t helper_cmpbge(uint64_t op1, uint64_t op2) +{ + uint8_t opa, opb, res; + int i; + + res = 0; + for (i = 0; i < 8; i++) { + opa = op1 >> (i * 8); + opb = op2 >> (i * 8); + if (opa >= opb) { + res |= 1 << i; + } + } + return res; +} + +uint64_t helper_minub8(uint64_t op1, uint64_t op2) +{ + uint64_t res = 0; + uint8_t opa, opb, opr; + int i; + + for (i = 0; i < 8; ++i) { + opa = op1 >> (i * 8); + opb = op2 >> (i * 8); + opr = opa < opb ? opa : opb; + res |= (uint64_t)opr << (i * 8); + } + return res; +} + +uint64_t helper_minsb8(uint64_t op1, uint64_t op2) +{ + uint64_t res = 0; + int8_t opa, opb; + uint8_t opr; + int i; + + for (i = 0; i < 8; ++i) { + opa = op1 >> (i * 8); + opb = op2 >> (i * 8); + opr = opa < opb ? opa : opb; + res |= (uint64_t)opr << (i * 8); + } + return res; +} + +uint64_t helper_minuw4(uint64_t op1, uint64_t op2) +{ + uint64_t res = 0; + uint16_t opa, opb, opr; + int i; + + for (i = 0; i < 4; ++i) { + opa = op1 >> (i * 16); + opb = op2 >> (i * 16); + opr = opa < opb ? opa : opb; + res |= (uint64_t)opr << (i * 16); + } + return res; +} + +uint64_t helper_minsw4(uint64_t op1, uint64_t op2) +{ + uint64_t res = 0; + int16_t opa, opb; + uint16_t opr; + int i; + + for (i = 0; i < 4; ++i) { + opa = op1 >> (i * 16); + opb = op2 >> (i * 16); + opr = opa < opb ? opa : opb; + res |= (uint64_t)opr << (i * 16); + } + return res; +} + +uint64_t helper_maxub8(uint64_t op1, uint64_t op2) +{ + uint64_t res = 0; + uint8_t opa, opb, opr; + int i; + + for (i = 0; i < 8; ++i) { + opa = op1 >> (i * 8); + opb = op2 >> (i * 8); + opr = opa > opb ? opa : opb; + res |= (uint64_t)opr << (i * 8); + } + return res; +} + +uint64_t helper_maxsb8(uint64_t op1, uint64_t op2) +{ + uint64_t res = 0; + int8_t opa, opb; + uint8_t opr; + int i; + + for (i = 0; i < 8; ++i) { + opa = op1 >> (i * 8); + opb = op2 >> (i * 8); + opr = opa > opb ? opa : opb; + res |= (uint64_t)opr << (i * 8); + } + return res; +} + +uint64_t helper_maxuw4(uint64_t op1, uint64_t op2) +{ + uint64_t res = 0; + uint16_t opa, opb, opr; + int i; + + for (i = 0; i < 4; ++i) { + opa = op1 >> (i * 16); + opb = op2 >> (i * 16); + opr = opa > opb ? opa : opb; + res |= (uint64_t)opr << (i * 16); + } + return res; +} + +uint64_t helper_maxsw4(uint64_t op1, uint64_t op2) +{ + uint64_t res = 0; + int16_t opa, opb; + uint16_t opr; + int i; + + for (i = 0; i < 4; ++i) { + opa = op1 >> (i * 16); + opb = op2 >> (i * 16); + opr = opa > opb ? opa : opb; + res |= (uint64_t)opr << (i * 16); + } + return res; +} + +uint64_t helper_perr(uint64_t op1, uint64_t op2) +{ + uint64_t res = 0; + uint8_t opa, opb, opr; + int i; + + for (i = 0; i < 8; ++i) { + opa = op1 >> (i * 8); + opb = op2 >> (i * 8); + if (opa >= opb) { + opr = opa - opb; + } else { + opr = opb - opa; + } + res += opr; + } + return res; +} + +uint64_t helper_pklb(uint64_t op1) +{ + return (op1 & 0xff) | ((op1 >> 24) & 0xff00); +} + +uint64_t helper_pkwb(uint64_t op1) +{ + return ((op1 & 0xff) + | ((op1 >> 8) & 0xff00) + | ((op1 >> 16) & 0xff0000) + | ((op1 >> 24) & 0xff000000)); +} + +uint64_t helper_unpkbl(uint64_t op1) +{ + return (op1 & 0xff) | ((op1 & 0xff00) << 24); +} + +uint64_t helper_unpkbw(uint64_t op1) +{ + return ((op1 & 0xff) + | ((op1 & 0xff00) << 8) + | ((op1 & 0xff0000) << 16) + | ((op1 & 0xff000000) << 24)); +} + +void helper_check_overflow(CPUAlphaState *env, uint64_t op1, uint64_t op2) +{ + if (unlikely(op1 != op2)) { + arith_excp(env, GETPC(), EXC_M_IOV, 0); + } +} diff --git a/qemu/target-alpha/machine.c b/qemu/target-alpha/machine.c new file mode 100644 index 000000000..e796bbe27 --- /dev/null +++ b/qemu/target-alpha/machine.c @@ -0,0 +1,89 @@ +#include "hw/hw.h" +#include "hw/boards.h" + +static int get_fpcr(QEMUFile *f, void *opaque, size_t size) +{ + CPUAlphaState *env = opaque; + cpu_alpha_store_fpcr(env, qemu_get_be64(f)); + return 0; +} + +static void put_fpcr(QEMUFile *f, void *opaque, size_t size) +{ + CPUAlphaState *env = opaque; + qemu_put_be64(f, cpu_alpha_load_fpcr(env)); +} + +static const VMStateInfo vmstate_fpcr = { + .name = "fpcr", + .get = get_fpcr, + .put = put_fpcr, +}; + +static VMStateField vmstate_env_fields[] = { + VMSTATE_UINTTL_ARRAY(ir, CPUAlphaState, 31), + VMSTATE_UINTTL_ARRAY(fir, CPUAlphaState, 31), + /* Save the architecture value of the fpcr, not the internally + expanded version. Since this architecture value does not + exist in memory to be stored, this requires a but of hoop + jumping. We want OFFSET=0 so that we effectively pass ENV + to the helper functions, and we need to fill in the name by + hand since there's no field of that name. */ + { + .name = "fpcr", + .version_id = 0, + .size = sizeof(uint64_t), + .info = &vmstate_fpcr, + .flags = VMS_SINGLE, + .offset = 0 + }, + VMSTATE_UINTTL(pc, CPUAlphaState), + VMSTATE_UINTTL(unique, CPUAlphaState), + VMSTATE_UINTTL(lock_addr, CPUAlphaState), + VMSTATE_UINTTL(lock_value, CPUAlphaState), + /* Note that lock_st_addr is not saved; it is a temporary + used during the execution of the st[lq]_c insns. */ + + VMSTATE_UINT8(ps, CPUAlphaState), + VMSTATE_UINT8(intr_flag, CPUAlphaState), + VMSTATE_UINT8(pal_mode, CPUAlphaState), + VMSTATE_UINT8(fen, CPUAlphaState), + + VMSTATE_UINT32(pcc_ofs, CPUAlphaState), + + VMSTATE_UINTTL(trap_arg0, CPUAlphaState), + VMSTATE_UINTTL(trap_arg1, CPUAlphaState), + VMSTATE_UINTTL(trap_arg2, CPUAlphaState), + + VMSTATE_UINTTL(exc_addr, CPUAlphaState), + VMSTATE_UINTTL(palbr, CPUAlphaState), + VMSTATE_UINTTL(ptbr, CPUAlphaState), + VMSTATE_UINTTL(vptptr, CPUAlphaState), + VMSTATE_UINTTL(sysval, CPUAlphaState), + VMSTATE_UINTTL(usp, CPUAlphaState), + + VMSTATE_UINTTL_ARRAY(shadow, CPUAlphaState, 8), + VMSTATE_UINTTL_ARRAY(scratch, CPUAlphaState, 24), + + VMSTATE_END_OF_LIST() +}; + +static const VMStateDescription vmstate_env = { + .name = "env", + .version_id = 1, + .minimum_version_id = 1, + .fields = vmstate_env_fields, +}; + +static VMStateField vmstate_cpu_fields[] = { + VMSTATE_CPU(), + VMSTATE_STRUCT(env, AlphaCPU, 1, vmstate_env, CPUAlphaState), + VMSTATE_END_OF_LIST() +}; + +const VMStateDescription vmstate_alpha_cpu = { + .name = "cpu", + .version_id = 1, + .minimum_version_id = 1, + .fields = vmstate_cpu_fields, +}; diff --git a/qemu/target-alpha/mem_helper.c b/qemu/target-alpha/mem_helper.c new file mode 100644 index 000000000..7b5e30ddb --- /dev/null +++ b/qemu/target-alpha/mem_helper.c @@ -0,0 +1,159 @@ +/* + * Helpers for loads and stores + * + * Copyright (c) 2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" + +/* Softmmu support */ +#ifndef CONFIG_USER_ONLY + +uint64_t helper_ldl_phys(CPUAlphaState *env, uint64_t p) +{ + CPUState *cs = CPU(alpha_env_get_cpu(env)); + return (int32_t)ldl_phys(cs->as, p); +} + +uint64_t helper_ldq_phys(CPUAlphaState *env, uint64_t p) +{ + CPUState *cs = CPU(alpha_env_get_cpu(env)); + return ldq_phys(cs->as, p); +} + +uint64_t helper_ldl_l_phys(CPUAlphaState *env, uint64_t p) +{ + CPUState *cs = CPU(alpha_env_get_cpu(env)); + env->lock_addr = p; + return env->lock_value = (int32_t)ldl_phys(cs->as, p); +} + +uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p) +{ + CPUState *cs = CPU(alpha_env_get_cpu(env)); + env->lock_addr = p; + return env->lock_value = ldq_phys(cs->as, p); +} + +void helper_stl_phys(CPUAlphaState *env, uint64_t p, uint64_t v) +{ + CPUState *cs = CPU(alpha_env_get_cpu(env)); + stl_phys(cs->as, p, v); +} + +void helper_stq_phys(CPUAlphaState *env, uint64_t p, uint64_t v) +{ + CPUState *cs = CPU(alpha_env_get_cpu(env)); + stq_phys(cs->as, p, v); +} + +uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v) +{ + CPUState *cs = CPU(alpha_env_get_cpu(env)); + uint64_t ret = 0; + + if (p == env->lock_addr) { + int32_t old = ldl_phys(cs->as, p); + if (old == (int32_t)env->lock_value) { + stl_phys(cs->as, p, v); + ret = 1; + } + } + env->lock_addr = -1; + + return ret; +} + +uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v) +{ + CPUState *cs = CPU(alpha_env_get_cpu(env)); + uint64_t ret = 0; + + if (p == env->lock_addr) { + uint64_t old = ldq_phys(cs->as, p); + if (old == env->lock_value) { + stq_phys(cs->as, p, v); + ret = 1; + } + } + env->lock_addr = -1; + + return ret; +} + +void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + int is_write, int is_user, uintptr_t retaddr) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + uint64_t pc; + uint32_t insn; + + if (retaddr) { + cpu_restore_state(cs, retaddr); + } + + pc = env->pc; + insn = cpu_ldl_code(env, pc); + + env->trap_arg0 = addr; + env->trap_arg1 = insn >> 26; /* opcode */ + env->trap_arg2 = (insn >> 21) & 31; /* dest regno */ + cs->exception_index = EXCP_UNALIGN; + env->error_code = 0; + cpu_loop_exit(cs); +} + +void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr, + bool is_write, bool is_exec, int unused, + unsigned size) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + + env->trap_arg0 = addr; + env->trap_arg1 = is_write ? 1 : 0; + cs->exception_index = EXCP_MCHK; + env->error_code = 0; + + /* ??? We should cpu_restore_state to the faulting insn, but this hook + does not have access to the retaddr value from the orignal helper. + It's all moot until the QEMU PALcode grows an MCHK handler. */ + + cpu_loop_exit(cs); +} + +/* try to fill the TLB and return an exception if error. If retaddr is + NULL, it means that the function was called in C code (i.e. not + from generated code or from helper.c) */ +/* XXX: fix it to restore all registers */ +void tlb_fill(CPUState *cs, target_ulong addr, int is_write, + int mmu_idx, uintptr_t retaddr) +{ + int ret; + + ret = alpha_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); + if (unlikely(ret != 0)) { + if (retaddr) { + cpu_restore_state(cs, retaddr); + } + /* Exception index and error code are already set */ + cpu_loop_exit(cs); + } +} +#endif /* CONFIG_USER_ONLY */ diff --git a/qemu/target-alpha/sys_helper.c b/qemu/target-alpha/sys_helper.c new file mode 100644 index 000000000..1c59e108b --- /dev/null +++ b/qemu/target-alpha/sys_helper.c @@ -0,0 +1,111 @@ +/* + * Helpers for system instructions. + * + * Copyright (c) 2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "sysemu/sysemu.h" +#include "qemu/timer.h" + + +uint64_t helper_load_pcc(CPUAlphaState *env) +{ +#ifndef CONFIG_USER_ONLY + /* In system mode we have access to a decent high-resolution clock. + In order to make OS-level time accounting work with the RPCC, + present it with a well-timed clock fixed at 250MHz. */ + return (((uint64_t)env->pcc_ofs << 32) + | (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2)); +#else + /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through the host cpu + clock ticks. Also, don't bother taking PCC_OFS into account. */ + return (uint32_t)cpu_get_real_ticks(); +#endif +} + +/* PALcode support special instructions */ +#ifndef CONFIG_USER_ONLY +void helper_hw_ret(CPUAlphaState *env, uint64_t a) +{ + env->pc = a & ~3; + env->intr_flag = 0; + env->lock_addr = -1; + if ((a & 1) == 0) { + env->pal_mode = 0; + swap_shadow_regs(env); + } +} + +void helper_call_pal(CPUAlphaState *env, uint64_t pc, uint64_t entry_ofs) +{ + int pal_mode = env->pal_mode; + env->exc_addr = pc | pal_mode; + env->pc = env->palbr + entry_ofs; + if (!pal_mode) { + env->pal_mode = 1; + swap_shadow_regs(env); + } +} + +void helper_tbia(CPUAlphaState *env) +{ + tlb_flush(CPU(alpha_env_get_cpu(env)), 1); +} + +void helper_tbis(CPUAlphaState *env, uint64_t p) +{ + tlb_flush_page(CPU(alpha_env_get_cpu(env)), p); +} + +void helper_tb_flush(CPUAlphaState *env) +{ + tb_flush(CPU(alpha_env_get_cpu(env))); +} + +void helper_halt(uint64_t restart) +{ + if (restart) { + qemu_system_reset_request(); + } else { + qemu_system_shutdown_request(); + } +} + +uint64_t helper_get_vmtime(void) +{ + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +} + +uint64_t helper_get_walltime(void) +{ + return qemu_clock_get_ns(rtc_clock); +} + +void helper_set_alarm(CPUAlphaState *env, uint64_t expire) +{ + AlphaCPU *cpu = alpha_env_get_cpu(env); + + if (expire) { + env->alarm_expire = expire; + timer_mod(cpu->alarm_timer, expire); + } else { + timer_del(cpu->alarm_timer); + } +} + +#endif /* CONFIG_USER_ONLY */ diff --git a/qemu/target-alpha/translate.c b/qemu/target-alpha/translate.c new file mode 100644 index 000000000..81d4ff827 --- /dev/null +++ b/qemu/target-alpha/translate.c @@ -0,0 +1,2961 @@ +/* + * Alpha emulation cpu translation for qemu. + * + * Copyright (c) 2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "disas/disas.h" +#include "qemu/host-utils.h" +#include "tcg-op.h" +#include "exec/cpu_ldst.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "trace-tcg.h" + + +#undef ALPHA_DEBUG_DISAS +#define CONFIG_SOFTFLOAT_INLINE + +#ifdef ALPHA_DEBUG_DISAS +# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) +#else +# define LOG_DISAS(...) do { } while (0) +#endif + +typedef struct DisasContext DisasContext; +struct DisasContext { + struct TranslationBlock *tb; + uint64_t pc; + int mem_idx; + + /* Current rounding mode for this TB. */ + int tb_rm; + /* Current flush-to-zero setting for this TB. */ + int tb_ftz; + + /* implver value for this CPU. */ + int implver; + + /* Temporaries for $31 and $f31 as source and destination. */ + TCGv zero; + TCGv sink; + /* Temporary for immediate constants. */ + TCGv lit; + + bool singlestep_enabled; +}; + +/* Return values from translate_one, indicating the state of the TB. + Note that zero indicates that we are not exiting the TB. */ + +typedef enum { + NO_EXIT, + + /* We have emitted one or more goto_tb. No fixup required. */ + EXIT_GOTO_TB, + + /* We are not using a goto_tb (for whatever reason), but have updated + the PC (for whatever reason), so there's no need to do it again on + exiting the TB. */ + EXIT_PC_UPDATED, + + /* We are exiting the TB, but have neither emitted a goto_tb, nor + updated the PC for the next instruction to be executed. */ + EXIT_PC_STALE, + + /* We are ending the TB with a noreturn function call, e.g. longjmp. + No following code will be executed. */ + EXIT_NORETURN, +} ExitStatus; + +/* global register indexes */ +static TCGv_ptr cpu_env; +static TCGv cpu_ir[31]; +static TCGv cpu_fir[31]; +static TCGv cpu_pc; +static TCGv cpu_lock_addr; +static TCGv cpu_lock_st_addr; +static TCGv cpu_lock_value; + +#include "exec/gen-icount.h" + +void alpha_translate_init(void) +{ +#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) } + + typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar; + static const GlobalVar vars[] = { + DEF_VAR(pc), + DEF_VAR(lock_addr), + DEF_VAR(lock_st_addr), + DEF_VAR(lock_value), + }; + +#undef DEF_VAR + + /* Use the symbolic register names that match the disassembler. */ + static const char greg_names[31][4] = { + "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", + "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp", + "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", + "t10", "t11", "ra", "t12", "at", "gp", "sp" + }; + static const char freg_names[31][4] = { + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", + "f24", "f25", "f26", "f27", "f28", "f29", "f30" + }; + + static bool done_init = 0; + int i; + + if (done_init) { + return; + } + done_init = 1; + + cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); + + for (i = 0; i < 31; i++) { + cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUAlphaState, ir[i]), + greg_names[i]); + } + + for (i = 0; i < 31; i++) { + cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0, + offsetof(CPUAlphaState, fir[i]), + freg_names[i]); + } + + for (i = 0; i < ARRAY_SIZE(vars); ++i) { + const GlobalVar *v = &vars[i]; + *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name); + } +} + +static TCGv load_zero(DisasContext *ctx) +{ + if (TCGV_IS_UNUSED_I64(ctx->zero)) { + ctx->zero = tcg_const_i64(0); + } + return ctx->zero; +} + +static TCGv dest_sink(DisasContext *ctx) +{ + if (TCGV_IS_UNUSED_I64(ctx->sink)) { + ctx->sink = tcg_temp_new(); + } + return ctx->sink; +} + +static TCGv load_gpr(DisasContext *ctx, unsigned reg) +{ + if (likely(reg < 31)) { + return cpu_ir[reg]; + } else { + return load_zero(ctx); + } +} + +static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg, + uint8_t lit, bool islit) +{ + if (islit) { + ctx->lit = tcg_const_i64(lit); + return ctx->lit; + } else if (likely(reg < 31)) { + return cpu_ir[reg]; + } else { + return load_zero(ctx); + } +} + +static TCGv dest_gpr(DisasContext *ctx, unsigned reg) +{ + if (likely(reg < 31)) { + return cpu_ir[reg]; + } else { + return dest_sink(ctx); + } +} + +static TCGv load_fpr(DisasContext *ctx, unsigned reg) +{ + if (likely(reg < 31)) { + return cpu_fir[reg]; + } else { + return load_zero(ctx); + } +} + +static TCGv dest_fpr(DisasContext *ctx, unsigned reg) +{ + if (likely(reg < 31)) { + return cpu_fir[reg]; + } else { + return dest_sink(ctx); + } +} + +static void gen_excp_1(int exception, int error_code) +{ + TCGv_i32 tmp1, tmp2; + + tmp1 = tcg_const_i32(exception); + tmp2 = tcg_const_i32(error_code); + gen_helper_excp(cpu_env, tmp1, tmp2); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp1); +} + +static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code) +{ + tcg_gen_movi_i64(cpu_pc, ctx->pc); + gen_excp_1(exception, error_code); + return EXIT_NORETURN; +} + +static inline ExitStatus gen_invalid(DisasContext *ctx) +{ + return gen_excp(ctx, EXCP_OPCDEC, 0); +} + +static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags) +{ + TCGv_i32 tmp32 = tcg_temp_new_i32(); + tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); + gen_helper_memory_to_f(t0, tmp32); + tcg_temp_free_i32(tmp32); +} + +static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags) +{ + TCGv tmp = tcg_temp_new(); + tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ); + gen_helper_memory_to_g(t0, tmp); + tcg_temp_free(tmp); +} + +static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags) +{ + TCGv_i32 tmp32 = tcg_temp_new_i32(); + tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); + gen_helper_memory_to_s(t0, tmp32); + tcg_temp_free_i32(tmp32); +} + +static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags) +{ + tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL); + tcg_gen_mov_i64(cpu_lock_addr, t1); + tcg_gen_mov_i64(cpu_lock_value, t0); +} + +static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags) +{ + tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ); + tcg_gen_mov_i64(cpu_lock_addr, t1); + tcg_gen_mov_i64(cpu_lock_value, t0); +} + +static inline void gen_load_mem(DisasContext *ctx, + void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1, + int flags), + int ra, int rb, int32_t disp16, bool fp, + bool clear) +{ + TCGv tmp, addr, va; + + /* LDQ_U with ra $31 is UNOP. Other various loads are forms of + prefetches, which we can treat as nops. No worries about + missed exceptions here. */ + if (unlikely(ra == 31)) { + return; + } + + tmp = tcg_temp_new(); + addr = load_gpr(ctx, rb); + + if (disp16) { + tcg_gen_addi_i64(tmp, addr, disp16); + addr = tmp; + } + if (clear) { + tcg_gen_andi_i64(tmp, addr, ~0x7); + addr = tmp; + } + + va = (fp ? cpu_fir[ra] : cpu_ir[ra]); + tcg_gen_qemu_load(va, addr, ctx->mem_idx); + + tcg_temp_free(tmp); +} + +static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags) +{ + TCGv_i32 tmp32 = tcg_temp_new_i32(); + gen_helper_f_to_memory(tmp32, t0); + tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); + tcg_temp_free_i32(tmp32); +} + +static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags) +{ + TCGv tmp = tcg_temp_new(); + gen_helper_g_to_memory(tmp, t0); + tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ); + tcg_temp_free(tmp); +} + +static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags) +{ + TCGv_i32 tmp32 = tcg_temp_new_i32(); + gen_helper_s_to_memory(tmp32, t0); + tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); + tcg_temp_free_i32(tmp32); +} + +static inline void gen_store_mem(DisasContext *ctx, + void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, + int flags), + int ra, int rb, int32_t disp16, bool fp, + bool clear) +{ + TCGv tmp, addr, va; + + tmp = tcg_temp_new(); + addr = load_gpr(ctx, rb); + + if (disp16) { + tcg_gen_addi_i64(tmp, addr, disp16); + addr = tmp; + } + if (clear) { + tcg_gen_andi_i64(tmp, addr, ~0x7); + addr = tmp; + } + + va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra)); + tcg_gen_qemu_store(va, addr, ctx->mem_idx); + + tcg_temp_free(tmp); +} + +static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb, + int32_t disp16, int quad) +{ + TCGv addr; + + if (ra == 31) { + /* ??? Don't bother storing anything. The user can't tell + the difference, since the zero register always reads zero. */ + return NO_EXIT; + } + +#if defined(CONFIG_USER_ONLY) + addr = cpu_lock_st_addr; +#else + addr = tcg_temp_local_new(); +#endif + + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); + +#if defined(CONFIG_USER_ONLY) + /* ??? This is handled via a complicated version of compare-and-swap + in the cpu_loop. Hopefully one day we'll have a real CAS opcode + in TCG so that this isn't necessary. */ + return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra); +#else + /* ??? In system mode we are never multi-threaded, so CAS can be + implemented via a non-atomic load-compare-store sequence. */ + { + TCGLabel *lab_fail, *lab_done; + TCGv val; + + lab_fail = gen_new_label(); + lab_done = gen_new_label(); + tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); + + val = tcg_temp_new(); + tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL); + tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail); + + tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx, + quad ? MO_LEQ : MO_LEUL); + tcg_gen_movi_i64(cpu_ir[ra], 1); + tcg_gen_br(lab_done); + + gen_set_label(lab_fail); + tcg_gen_movi_i64(cpu_ir[ra], 0); + + gen_set_label(lab_done); + tcg_gen_movi_i64(cpu_lock_addr, -1); + + tcg_temp_free(addr); + return NO_EXIT; + } +#endif +} + +static bool in_superpage(DisasContext *ctx, int64_t addr) +{ + return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0 + && addr < 0 + && ((addr >> 41) & 3) == 2 + && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63); +} + +static bool use_goto_tb(DisasContext *ctx, uint64_t dest) +{ + /* Suppress goto_tb in the case of single-steping and IO. */ + if ((ctx->tb->cflags & CF_LAST_IO) + || ctx->singlestep_enabled || singlestep) { + return false; + } + /* If the destination is in the superpage, the page perms can't change. */ + if (in_superpage(ctx, dest)) { + return true; + } + /* Check for the dest on the same page as the start of the TB. */ + return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0; +} + +static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp) +{ + uint64_t dest = ctx->pc + (disp << 2); + + if (ra != 31) { + tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); + } + + /* Notice branch-to-next; used to initialize RA with the PC. */ + if (disp == 0) { + return 0; + } else if (use_goto_tb(ctx, dest)) { + tcg_gen_goto_tb(0); + tcg_gen_movi_i64(cpu_pc, dest); + tcg_gen_exit_tb((uintptr_t)ctx->tb); + return EXIT_GOTO_TB; + } else { + tcg_gen_movi_i64(cpu_pc, dest); + return EXIT_PC_UPDATED; + } +} + +static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond, + TCGv cmp, int32_t disp) +{ + uint64_t dest = ctx->pc + (disp << 2); + TCGLabel *lab_true = gen_new_label(); + + if (use_goto_tb(ctx, dest)) { + tcg_gen_brcondi_i64(cond, cmp, 0, lab_true); + + tcg_gen_goto_tb(0); + tcg_gen_movi_i64(cpu_pc, ctx->pc); + tcg_gen_exit_tb((uintptr_t)ctx->tb); + + gen_set_label(lab_true); + tcg_gen_goto_tb(1); + tcg_gen_movi_i64(cpu_pc, dest); + tcg_gen_exit_tb((uintptr_t)ctx->tb + 1); + + return EXIT_GOTO_TB; + } else { + TCGv_i64 z = tcg_const_i64(0); + TCGv_i64 d = tcg_const_i64(dest); + TCGv_i64 p = tcg_const_i64(ctx->pc); + + tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p); + + tcg_temp_free_i64(z); + tcg_temp_free_i64(d); + tcg_temp_free_i64(p); + return EXIT_PC_UPDATED; + } +} + +static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra, + int32_t disp, int mask) +{ + TCGv cmp_tmp; + + if (mask) { + cmp_tmp = tcg_temp_new(); + tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1); + } else { + cmp_tmp = load_gpr(ctx, ra); + } + + return gen_bcond_internal(ctx, cond, cmp_tmp, disp); +} + +/* Fold -0.0 for comparison with COND. */ + +static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src) +{ + uint64_t mzero = 1ull << 63; + + switch (cond) { + case TCG_COND_LE: + case TCG_COND_GT: + /* For <= or >, the -0.0 value directly compares the way we want. */ + tcg_gen_mov_i64(dest, src); + break; + + case TCG_COND_EQ: + case TCG_COND_NE: + /* For == or !=, we can simply mask off the sign bit and compare. */ + tcg_gen_andi_i64(dest, src, mzero - 1); + break; + + case TCG_COND_GE: + case TCG_COND_LT: + /* For >= or <, map -0.0 to +0.0 via comparison and mask. */ + tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero); + tcg_gen_neg_i64(dest, dest); + tcg_gen_and_i64(dest, dest, src); + break; + + default: + abort(); + } +} + +static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, + int32_t disp) +{ + TCGv cmp_tmp = tcg_temp_new(); + gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra)); + return gen_bcond_internal(ctx, cond, cmp_tmp, disp); +} + +static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) +{ + TCGv_i64 va, vb, z; + + z = load_zero(ctx); + vb = load_fpr(ctx, rb); + va = tcg_temp_new(); + gen_fold_mzero(cond, va, load_fpr(ctx, ra)); + + tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc)); + + tcg_temp_free(va); +} + +#define QUAL_RM_N 0x080 /* Round mode nearest even */ +#define QUAL_RM_C 0x000 /* Round mode chopped */ +#define QUAL_RM_M 0x040 /* Round mode minus infinity */ +#define QUAL_RM_D 0x0c0 /* Round mode dynamic */ +#define QUAL_RM_MASK 0x0c0 + +#define QUAL_U 0x100 /* Underflow enable (fp output) */ +#define QUAL_V 0x100 /* Overflow enable (int output) */ +#define QUAL_S 0x400 /* Software completion enable */ +#define QUAL_I 0x200 /* Inexact detection enable */ + +static void gen_qual_roundmode(DisasContext *ctx, int fn11) +{ + TCGv_i32 tmp; + + fn11 &= QUAL_RM_MASK; + if (fn11 == ctx->tb_rm) { + return; + } + ctx->tb_rm = fn11; + + tmp = tcg_temp_new_i32(); + switch (fn11) { + case QUAL_RM_N: + tcg_gen_movi_i32(tmp, float_round_nearest_even); + break; + case QUAL_RM_C: + tcg_gen_movi_i32(tmp, float_round_to_zero); + break; + case QUAL_RM_M: + tcg_gen_movi_i32(tmp, float_round_down); + break; + case QUAL_RM_D: + tcg_gen_ld8u_i32(tmp, cpu_env, + offsetof(CPUAlphaState, fpcr_dyn_round)); + break; + } + +#if defined(CONFIG_SOFTFLOAT_INLINE) + /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode. + With CONFIG_SOFTFLOAT that expands to an out-of-line call that just + sets the one field. */ + tcg_gen_st8_i32(tmp, cpu_env, + offsetof(CPUAlphaState, fp_status.float_rounding_mode)); +#else + gen_helper_setroundmode(tmp); +#endif + + tcg_temp_free_i32(tmp); +} + +static void gen_qual_flushzero(DisasContext *ctx, int fn11) +{ + TCGv_i32 tmp; + + fn11 &= QUAL_U; + if (fn11 == ctx->tb_ftz) { + return; + } + ctx->tb_ftz = fn11; + + tmp = tcg_temp_new_i32(); + if (fn11) { + /* Underflow is enabled, use the FPCR setting. */ + tcg_gen_ld8u_i32(tmp, cpu_env, + offsetof(CPUAlphaState, fpcr_flush_to_zero)); + } else { + /* Underflow is disabled, force flush-to-zero. */ + tcg_gen_movi_i32(tmp, 1); + } + +#if defined(CONFIG_SOFTFLOAT_INLINE) + tcg_gen_st8_i32(tmp, cpu_env, + offsetof(CPUAlphaState, fp_status.flush_to_zero)); +#else + gen_helper_setflushzero(tmp); +#endif + + tcg_temp_free_i32(tmp); +} + +static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp) +{ + TCGv val; + + if (unlikely(reg == 31)) { + val = load_zero(ctx); + } else { + val = cpu_fir[reg]; + if ((fn11 & QUAL_S) == 0) { + if (is_cmp) { + gen_helper_ieee_input_cmp(cpu_env, val); + } else { + gen_helper_ieee_input(cpu_env, val); + } + } else { +#ifndef CONFIG_USER_ONLY + /* In system mode, raise exceptions for denormals like real + hardware. In user mode, proceed as if the OS completion + handler is handling the denormal as per spec. */ + gen_helper_ieee_input_s(cpu_env, val); +#endif + } + } + return val; +} + +static void gen_fp_exc_raise(int rc, int fn11) +{ + /* ??? We ought to be able to do something with imprecise exceptions. + E.g. notice we're still in the trap shadow of something within the + TB and do not generate the code to signal the exception; end the TB + when an exception is forced to arrive, either by consumption of a + register value or TRAPB or EXCB. */ + TCGv_i32 reg, ign; + uint32_t ignore = 0; + + if (!(fn11 & QUAL_U)) { + /* Note that QUAL_U == QUAL_V, so ignore either. */ + ignore |= FPCR_UNF | FPCR_IOV; + } + if (!(fn11 & QUAL_I)) { + ignore |= FPCR_INE; + } + ign = tcg_const_i32(ignore); + + /* ??? Pass in the regno of the destination so that the helper can + set EXC_MASK, which contains a bitmask of destination registers + that have caused arithmetic traps. A simple userspace emulation + does not require this. We do need it for a guest kernel's entArith, + or if we were to do something clever with imprecise exceptions. */ + reg = tcg_const_i32(rc + 32); + if (fn11 & QUAL_S) { + gen_helper_fp_exc_raise_s(cpu_env, ign, reg); + } else { + gen_helper_fp_exc_raise(cpu_env, ign, reg); + } + + tcg_temp_free_i32(reg); + tcg_temp_free_i32(ign); +} + +static void gen_cvtlq(TCGv vc, TCGv vb) +{ + TCGv tmp = tcg_temp_new(); + + /* The arithmetic right shift here, plus the sign-extended mask below + yields a sign-extended result without an explicit ext32s_i64. */ + tcg_gen_sari_i64(tmp, vb, 32); + tcg_gen_shri_i64(vc, vb, 29); + tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000); + tcg_gen_andi_i64(vc, vc, 0x3fffffff); + tcg_gen_or_i64(vc, vc, tmp); + + tcg_temp_free(tmp); +} + +static void gen_ieee_arith2(DisasContext *ctx, + void (*helper)(TCGv, TCGv_ptr, TCGv), + int rb, int rc, int fn11) +{ + TCGv vb; + + gen_qual_roundmode(ctx, fn11); + gen_qual_flushzero(ctx, fn11); + + vb = gen_ieee_input(ctx, rb, fn11, 0); + helper(dest_fpr(ctx, rc), cpu_env, vb); + + gen_fp_exc_raise(rc, fn11); +} + +#define IEEE_ARITH2(name) \ +static inline void glue(gen_, name)(DisasContext *ctx, \ + int rb, int rc, int fn11) \ +{ \ + gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \ +} +IEEE_ARITH2(sqrts) +IEEE_ARITH2(sqrtt) +IEEE_ARITH2(cvtst) +IEEE_ARITH2(cvtts) + +static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11) +{ + TCGv vb, vc; + + /* No need to set flushzero, since we have an integer output. */ + vb = gen_ieee_input(ctx, rb, fn11, 0); + vc = dest_fpr(ctx, rc); + + /* Almost all integer conversions use cropped rounding; + special case that. */ + if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) { + gen_helper_cvttq_c(vc, cpu_env, vb); + } else { + gen_qual_roundmode(ctx, fn11); + gen_helper_cvttq(vc, cpu_env, vb); + } + gen_fp_exc_raise(rc, fn11); +} + +static void gen_ieee_intcvt(DisasContext *ctx, + void (*helper)(TCGv, TCGv_ptr, TCGv), + int rb, int rc, int fn11) +{ + TCGv vb, vc; + + gen_qual_roundmode(ctx, fn11); + vb = load_fpr(ctx, rb); + vc = dest_fpr(ctx, rc); + + /* The only exception that can be raised by integer conversion + is inexact. Thus we only need to worry about exceptions when + inexact handling is requested. */ + if (fn11 & QUAL_I) { + helper(vc, cpu_env, vb); + gen_fp_exc_raise(rc, fn11); + } else { + helper(vc, cpu_env, vb); + } +} + +#define IEEE_INTCVT(name) \ +static inline void glue(gen_, name)(DisasContext *ctx, \ + int rb, int rc, int fn11) \ +{ \ + gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \ +} +IEEE_INTCVT(cvtqs) +IEEE_INTCVT(cvtqt) + +static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask) +{ + TCGv vmask = tcg_const_i64(mask); + TCGv tmp = tcg_temp_new_i64(); + + if (inv_a) { + tcg_gen_andc_i64(tmp, vmask, va); + } else { + tcg_gen_and_i64(tmp, va, vmask); + } + + tcg_gen_andc_i64(vc, vb, vmask); + tcg_gen_or_i64(vc, vc, tmp); + + tcg_temp_free(vmask); + tcg_temp_free(tmp); +} + +static void gen_ieee_arith3(DisasContext *ctx, + void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), + int ra, int rb, int rc, int fn11) +{ + TCGv va, vb, vc; + + gen_qual_roundmode(ctx, fn11); + gen_qual_flushzero(ctx, fn11); + + va = gen_ieee_input(ctx, ra, fn11, 0); + vb = gen_ieee_input(ctx, rb, fn11, 0); + vc = dest_fpr(ctx, rc); + helper(vc, cpu_env, va, vb); + + gen_fp_exc_raise(rc, fn11); +} + +#define IEEE_ARITH3(name) \ +static inline void glue(gen_, name)(DisasContext *ctx, \ + int ra, int rb, int rc, int fn11) \ +{ \ + gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \ +} +IEEE_ARITH3(adds) +IEEE_ARITH3(subs) +IEEE_ARITH3(muls) +IEEE_ARITH3(divs) +IEEE_ARITH3(addt) +IEEE_ARITH3(subt) +IEEE_ARITH3(mult) +IEEE_ARITH3(divt) + +static void gen_ieee_compare(DisasContext *ctx, + void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), + int ra, int rb, int rc, int fn11) +{ + TCGv va, vb, vc; + + va = gen_ieee_input(ctx, ra, fn11, 1); + vb = gen_ieee_input(ctx, rb, fn11, 1); + vc = dest_fpr(ctx, rc); + helper(vc, cpu_env, va, vb); + + gen_fp_exc_raise(rc, fn11); +} + +#define IEEE_CMP3(name) \ +static inline void glue(gen_, name)(DisasContext *ctx, \ + int ra, int rb, int rc, int fn11) \ +{ \ + gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \ +} +IEEE_CMP3(cmptun) +IEEE_CMP3(cmpteq) +IEEE_CMP3(cmptlt) +IEEE_CMP3(cmptle) + +static inline uint64_t zapnot_mask(uint8_t lit) +{ + uint64_t mask = 0; + int i; + + for (i = 0; i < 8; ++i) { + if ((lit >> i) & 1) { + mask |= 0xffull << (i * 8); + } + } + return mask; +} + +/* Implement zapnot with an immediate operand, which expands to some + form of immediate AND. This is a basic building block in the + definition of many of the other byte manipulation instructions. */ +static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit) +{ + switch (lit) { + case 0x00: + tcg_gen_movi_i64(dest, 0); + break; + case 0x01: + tcg_gen_ext8u_i64(dest, src); + break; + case 0x03: + tcg_gen_ext16u_i64(dest, src); + break; + case 0x0f: + tcg_gen_ext32u_i64(dest, src); + break; + case 0xff: + tcg_gen_mov_i64(dest, src); + break; + default: + tcg_gen_andi_i64(dest, src, zapnot_mask(lit)); + break; + } +} + +/* EXTWH, EXTLH, EXTQH */ +static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, + uint8_t lit, uint8_t byte_mask) +{ + if (islit) { + tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f); + } else { + TCGv tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3); + tcg_gen_neg_i64(tmp, tmp); + tcg_gen_andi_i64(tmp, tmp, 0x3f); + tcg_gen_shl_i64(vc, va, tmp); + tcg_temp_free(tmp); + } + gen_zapnoti(vc, vc, byte_mask); +} + +/* EXTBL, EXTWL, EXTLL, EXTQL */ +static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, + uint8_t lit, uint8_t byte_mask) +{ + if (islit) { + tcg_gen_shri_i64(vc, va, (lit & 7) * 8); + } else { + TCGv tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7); + tcg_gen_shli_i64(tmp, tmp, 3); + tcg_gen_shr_i64(vc, va, tmp); + tcg_temp_free(tmp); + } + gen_zapnoti(vc, vc, byte_mask); +} + +/* INSWH, INSLH, INSQH */ +static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, + uint8_t lit, uint8_t byte_mask) +{ + TCGv tmp = tcg_temp_new(); + + /* The instruction description has us left-shift the byte mask and extract + bits <15:8> and apply that zap at the end. This is equivalent to simply + performing the zap first and shifting afterward. */ + gen_zapnoti(tmp, va, byte_mask); + + if (islit) { + lit &= 7; + if (unlikely(lit == 0)) { + tcg_gen_movi_i64(vc, 0); + } else { + tcg_gen_shri_i64(vc, tmp, 64 - lit * 8); + } + } else { + TCGv shift = tcg_temp_new(); + + /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this + portably by splitting the shift into two parts: shift_count-1 and 1. + Arrange for the -1 by using ones-complement instead of + twos-complement in the negation: ~(B * 8) & 63. */ + + tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); + tcg_gen_not_i64(shift, shift); + tcg_gen_andi_i64(shift, shift, 0x3f); + + tcg_gen_shr_i64(vc, tmp, shift); + tcg_gen_shri_i64(vc, vc, 1); + tcg_temp_free(shift); + } + tcg_temp_free(tmp); +} + +/* INSBL, INSWL, INSLL, INSQL */ +static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, + uint8_t lit, uint8_t byte_mask) +{ + TCGv tmp = tcg_temp_new(); + + /* The instruction description has us left-shift the byte mask + the same number of byte slots as the data and apply the zap + at the end. This is equivalent to simply performing the zap + first and shifting afterward. */ + gen_zapnoti(tmp, va, byte_mask); + + if (islit) { + tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8); + } else { + TCGv shift = tcg_temp_new(); + tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); + tcg_gen_shli_i64(shift, shift, 3); + tcg_gen_shl_i64(vc, tmp, shift); + tcg_temp_free(shift); + } + tcg_temp_free(tmp); +} + +/* MSKWH, MSKLH, MSKQH */ +static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, + uint8_t lit, uint8_t byte_mask) +{ + if (islit) { + gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8)); + } else { + TCGv shift = tcg_temp_new(); + TCGv mask = tcg_temp_new(); + + /* The instruction description is as above, where the byte_mask + is shifted left, and then we extract bits <15:8>. This can be + emulated with a right-shift on the expanded byte mask. This + requires extra care because for an input <2:0> == 0 we need a + shift of 64 bits in order to generate a zero. This is done by + splitting the shift into two parts, the variable shift - 1 + followed by a constant 1 shift. The code we expand below is + equivalent to ~(B * 8) & 63. */ + + tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); + tcg_gen_not_i64(shift, shift); + tcg_gen_andi_i64(shift, shift, 0x3f); + tcg_gen_movi_i64(mask, zapnot_mask (byte_mask)); + tcg_gen_shr_i64(mask, mask, shift); + tcg_gen_shri_i64(mask, mask, 1); + + tcg_gen_andc_i64(vc, va, mask); + + tcg_temp_free(mask); + tcg_temp_free(shift); + } +} + +/* MSKBL, MSKWL, MSKLL, MSKQL */ +static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, + uint8_t lit, uint8_t byte_mask) +{ + if (islit) { + gen_zapnoti(vc, va, ~(byte_mask << (lit & 7))); + } else { + TCGv shift = tcg_temp_new(); + TCGv mask = tcg_temp_new(); + + tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); + tcg_gen_shli_i64(shift, shift, 3); + tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); + tcg_gen_shl_i64(mask, mask, shift); + + tcg_gen_andc_i64(vc, va, mask); + + tcg_temp_free(mask); + tcg_temp_free(shift); + } +} + +static void gen_rx(int ra, int set) +{ + TCGv_i32 tmp; + + if (ra != 31) { + tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag)); + } + + tmp = tcg_const_i32(set); + tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag)); + tcg_temp_free_i32(tmp); +} + +static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) +{ + /* We're emulating OSF/1 PALcode. Many of these are trivial access + to internal cpu registers. */ + + /* Unprivileged PAL call */ + if (palcode >= 0x80 && palcode < 0xC0) { + switch (palcode) { + case 0x86: + /* IMB */ + /* No-op inside QEMU. */ + break; + case 0x9E: + /* RDUNIQUE */ + tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env, + offsetof(CPUAlphaState, unique)); + break; + case 0x9F: + /* WRUNIQUE */ + tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, + offsetof(CPUAlphaState, unique)); + break; + default: + palcode &= 0xbf; + goto do_call_pal; + } + return NO_EXIT; + } + +#ifndef CONFIG_USER_ONLY + /* Privileged PAL code */ + if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) { + switch (palcode) { + case 0x01: + /* CFLUSH */ + /* No-op inside QEMU. */ + break; + case 0x02: + /* DRAINA */ + /* No-op inside QEMU. */ + break; + case 0x2D: + /* WRVPTPTR */ + tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, + offsetof(CPUAlphaState, vptptr)); + break; + case 0x31: + /* WRVAL */ + tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, + offsetof(CPUAlphaState, sysval)); + break; + case 0x32: + /* RDVAL */ + tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env, + offsetof(CPUAlphaState, sysval)); + break; + + case 0x35: { + /* SWPIPL */ + TCGv tmp; + + /* Note that we already know we're in kernel mode, so we know + that PS only contains the 3 IPL bits. */ + tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, + offsetof(CPUAlphaState, ps)); + + /* But make sure and store only the 3 IPL bits from the user. */ + tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK); + tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps)); + tcg_temp_free(tmp); + break; + } + + case 0x36: + /* RDPS */ + tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, + offsetof(CPUAlphaState, ps)); + break; + case 0x38: + /* WRUSP */ + tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, + offsetof(CPUAlphaState, usp)); + break; + case 0x3A: + /* RDUSP */ + tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env, + offsetof(CPUAlphaState, usp)); + break; + case 0x3C: + /* WHAMI */ + tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env, + -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index)); + break; + + default: + palcode &= 0x3f; + goto do_call_pal; + } + return NO_EXIT; + } +#endif + return gen_invalid(ctx); + + do_call_pal: +#ifdef CONFIG_USER_ONLY + return gen_excp(ctx, EXCP_CALL_PAL, palcode); +#else + { + TCGv pc = tcg_const_i64(ctx->pc); + TCGv entry = tcg_const_i64(palcode & 0x80 + ? 0x2000 + (palcode - 0x80) * 64 + : 0x1000 + palcode * 64); + + gen_helper_call_pal(cpu_env, pc, entry); + + tcg_temp_free(entry); + tcg_temp_free(pc); + + /* Since the destination is running in PALmode, we don't really + need the page permissions check. We'll see the existence of + the page when we create the TB, and we'll flush all TBs if + we change the PAL base register. */ + if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) { + tcg_gen_goto_tb(0); + tcg_gen_exit_tb((uintptr_t)ctx->tb); + return EXIT_GOTO_TB; + } + + return EXIT_PC_UPDATED; + } +#endif +} + +#ifndef CONFIG_USER_ONLY + +#define PR_BYTE 0x100000 +#define PR_LONG 0x200000 + +static int cpu_pr_data(int pr) +{ + switch (pr) { + case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE; + case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE; + case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG; + case 3: return offsetof(CPUAlphaState, trap_arg0); + case 4: return offsetof(CPUAlphaState, trap_arg1); + case 5: return offsetof(CPUAlphaState, trap_arg2); + case 6: return offsetof(CPUAlphaState, exc_addr); + case 7: return offsetof(CPUAlphaState, palbr); + case 8: return offsetof(CPUAlphaState, ptbr); + case 9: return offsetof(CPUAlphaState, vptptr); + case 10: return offsetof(CPUAlphaState, unique); + case 11: return offsetof(CPUAlphaState, sysval); + case 12: return offsetof(CPUAlphaState, usp); + + case 32 ... 39: + return offsetof(CPUAlphaState, shadow[pr - 32]); + case 40 ... 63: + return offsetof(CPUAlphaState, scratch[pr - 40]); + + case 251: + return offsetof(CPUAlphaState, alarm_expire); + } + return 0; +} + +static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno) +{ + int data = cpu_pr_data(regno); + + /* Special help for VMTIME and WALLTIME. */ + if (regno == 250 || regno == 249) { + void (*helper)(TCGv) = gen_helper_get_walltime; + if (regno == 249) { + helper = gen_helper_get_vmtime; + } + if (ctx->tb->cflags & CF_USE_ICOUNT) { + gen_io_start(); + helper(va); + gen_io_end(); + return EXIT_PC_STALE; + } else { + helper(va); + return NO_EXIT; + } + } + + /* The basic registers are data only, and unknown registers + are read-zero, write-ignore. */ + if (data == 0) { + tcg_gen_movi_i64(va, 0); + } else if (data & PR_BYTE) { + tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE); + } else if (data & PR_LONG) { + tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG); + } else { + tcg_gen_ld_i64(va, cpu_env, data); + } + return NO_EXIT; +} + +static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno) +{ + TCGv tmp; + int data; + + switch (regno) { + case 255: + /* TBIA */ + gen_helper_tbia(cpu_env); + break; + + case 254: + /* TBIS */ + gen_helper_tbis(cpu_env, vb); + break; + + case 253: + /* WAIT */ + tmp = tcg_const_i64(1); + tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) + + offsetof(CPUState, halted)); + return gen_excp(ctx, EXCP_HLT, 0); + + case 252: + /* HALT */ + gen_helper_halt(vb); + return EXIT_PC_STALE; + + case 251: + /* ALARM */ + gen_helper_set_alarm(cpu_env, vb); + break; + + case 7: + /* PALBR */ + tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr)); + /* Changing the PAL base register implies un-chaining all of the TBs + that ended with a CALL_PAL. Since the base register usually only + changes during boot, flushing everything works well. */ + gen_helper_tb_flush(cpu_env); + return EXIT_PC_STALE; + + default: + /* The basic registers are data only, and unknown registers + are read-zero, write-ignore. */ + data = cpu_pr_data(regno); + if (data != 0) { + if (data & PR_BYTE) { + tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE); + } else if (data & PR_LONG) { + tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG); + } else { + tcg_gen_st_i64(vb, cpu_env, data); + } + } + break; + } + + return NO_EXIT; +} +#endif /* !USER_ONLY*/ + +#define REQUIRE_NO_LIT \ + do { \ + if (real_islit) { \ + goto invalid_opc; \ + } \ + } while (0) + +#define REQUIRE_TB_FLAG(FLAG) \ + do { \ + if ((ctx->tb->flags & (FLAG)) == 0) { \ + goto invalid_opc; \ + } \ + } while (0) + +#define REQUIRE_REG_31(WHICH) \ + do { \ + if (WHICH != 31) { \ + goto invalid_opc; \ + } \ + } while (0) + +static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) +{ + int32_t disp21, disp16, disp12 __attribute__((unused)); + uint16_t fn11; + uint8_t opc, ra, rb, rc, fpfn, fn7, lit; + bool islit, real_islit; + TCGv va, vb, vc, tmp, tmp2; + TCGv_i32 t32; + ExitStatus ret; + + /* Decode all instruction fields */ + opc = extract32(insn, 26, 6); + ra = extract32(insn, 21, 5); + rb = extract32(insn, 16, 5); + rc = extract32(insn, 0, 5); + real_islit = islit = extract32(insn, 12, 1); + lit = extract32(insn, 13, 8); + + disp21 = sextract32(insn, 0, 21); + disp16 = sextract32(insn, 0, 16); + disp12 = sextract32(insn, 0, 12); + + fn11 = extract32(insn, 5, 11); + fpfn = extract32(insn, 5, 6); + fn7 = extract32(insn, 5, 7); + + if (rb == 31 && !islit) { + islit = true; + lit = 0; + } + + ret = NO_EXIT; + switch (opc) { + case 0x00: + /* CALL_PAL */ + ret = gen_call_pal(ctx, insn & 0x03ffffff); + break; + case 0x01: + /* OPC01 */ + goto invalid_opc; + case 0x02: + /* OPC02 */ + goto invalid_opc; + case 0x03: + /* OPC03 */ + goto invalid_opc; + case 0x04: + /* OPC04 */ + goto invalid_opc; + case 0x05: + /* OPC05 */ + goto invalid_opc; + case 0x06: + /* OPC06 */ + goto invalid_opc; + case 0x07: + /* OPC07 */ + goto invalid_opc; + + case 0x09: + /* LDAH */ + disp16 = (uint32_t)disp16 << 16; + /* fall through */ + case 0x08: + /* LDA */ + va = dest_gpr(ctx, ra); + /* It's worth special-casing immediate loads. */ + if (rb == 31) { + tcg_gen_movi_i64(va, disp16); + } else { + tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16); + } + break; + + case 0x0A: + /* LDBU */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); + break; + case 0x0B: + /* LDQ_U */ + gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1); + break; + case 0x0C: + /* LDWU */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); + break; + case 0x0D: + /* STW */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0); + break; + case 0x0E: + /* STB */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0); + break; + case 0x0F: + /* STQ_U */ + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1); + break; + + case 0x10: + vc = dest_gpr(ctx, rc); + vb = load_gpr_lit(ctx, rb, lit, islit); + + if (ra == 31) { + if (fn7 == 0x00) { + /* Special case ADDL as SEXTL. */ + tcg_gen_ext32s_i64(vc, vb); + break; + } + if (fn7 == 0x29) { + /* Special case SUBQ as NEGQ. */ + tcg_gen_neg_i64(vc, vb); + break; + } + } + + va = load_gpr(ctx, ra); + switch (fn7) { + case 0x00: + /* ADDL */ + tcg_gen_add_i64(vc, va, vb); + tcg_gen_ext32s_i64(vc, vc); + break; + case 0x02: + /* S4ADDL */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_add_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); + break; + case 0x09: + /* SUBL */ + tcg_gen_sub_i64(vc, va, vb); + tcg_gen_ext32s_i64(vc, vc); + break; + case 0x0B: + /* S4SUBL */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_sub_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); + break; + case 0x0F: + /* CMPBGE */ + gen_helper_cmpbge(vc, va, vb); + break; + case 0x12: + /* S8ADDL */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_add_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); + break; + case 0x1B: + /* S8SUBL */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_sub_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); + break; + case 0x1D: + /* CMPULT */ + tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb); + break; + case 0x20: + /* ADDQ */ + tcg_gen_add_i64(vc, va, vb); + break; + case 0x22: + /* S4ADDQ */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_add_i64(vc, tmp, vb); + tcg_temp_free(tmp); + break; + case 0x29: + /* SUBQ */ + tcg_gen_sub_i64(vc, va, vb); + break; + case 0x2B: + /* S4SUBQ */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_sub_i64(vc, tmp, vb); + tcg_temp_free(tmp); + break; + case 0x2D: + /* CMPEQ */ + tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb); + break; + case 0x32: + /* S8ADDQ */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_add_i64(vc, tmp, vb); + tcg_temp_free(tmp); + break; + case 0x3B: + /* S8SUBQ */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_sub_i64(vc, tmp, vb); + tcg_temp_free(tmp); + break; + case 0x3D: + /* CMPULE */ + tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb); + break; + case 0x40: + /* ADDL/V */ + tmp = tcg_temp_new(); + tcg_gen_ext32s_i64(tmp, va); + tcg_gen_ext32s_i64(vc, vb); + tcg_gen_add_i64(tmp, tmp, vc); + tcg_gen_ext32s_i64(vc, tmp); + gen_helper_check_overflow(cpu_env, vc, tmp); + tcg_temp_free(tmp); + break; + case 0x49: + /* SUBL/V */ + tmp = tcg_temp_new(); + tcg_gen_ext32s_i64(tmp, va); + tcg_gen_ext32s_i64(vc, vb); + tcg_gen_sub_i64(tmp, tmp, vc); + tcg_gen_ext32s_i64(vc, tmp); + gen_helper_check_overflow(cpu_env, vc, tmp); + tcg_temp_free(tmp); + break; + case 0x4D: + /* CMPLT */ + tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb); + break; + case 0x60: + /* ADDQ/V */ + tmp = tcg_temp_new(); + tmp2 = tcg_temp_new(); + tcg_gen_eqv_i64(tmp, va, vb); + tcg_gen_mov_i64(tmp2, va); + tcg_gen_add_i64(vc, va, vb); + tcg_gen_xor_i64(tmp2, tmp2, vc); + tcg_gen_and_i64(tmp, tmp, tmp2); + tcg_gen_shri_i64(tmp, tmp, 63); + tcg_gen_movi_i64(tmp2, 0); + gen_helper_check_overflow(cpu_env, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x69: + /* SUBQ/V */ + tmp = tcg_temp_new(); + tmp2 = tcg_temp_new(); + tcg_gen_xor_i64(tmp, va, vb); + tcg_gen_mov_i64(tmp2, va); + tcg_gen_sub_i64(vc, va, vb); + tcg_gen_xor_i64(tmp2, tmp2, vc); + tcg_gen_and_i64(tmp, tmp, tmp2); + tcg_gen_shri_i64(tmp, tmp, 63); + tcg_gen_movi_i64(tmp2, 0); + gen_helper_check_overflow(cpu_env, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x6D: + /* CMPLE */ + tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb); + break; + default: + goto invalid_opc; + } + break; + + case 0x11: + if (fn7 == 0x20) { + if (rc == 31) { + /* Special case BIS as NOP. */ + break; + } + if (ra == 31) { + /* Special case BIS as MOV. */ + vc = dest_gpr(ctx, rc); + if (islit) { + tcg_gen_movi_i64(vc, lit); + } else { + tcg_gen_mov_i64(vc, load_gpr(ctx, rb)); + } + break; + } + } + + vc = dest_gpr(ctx, rc); + vb = load_gpr_lit(ctx, rb, lit, islit); + + if (fn7 == 0x28 && ra == 31) { + /* Special case ORNOT as NOT. */ + tcg_gen_not_i64(vc, vb); + break; + } + + va = load_gpr(ctx, ra); + switch (fn7) { + case 0x00: + /* AND */ + tcg_gen_and_i64(vc, va, vb); + break; + case 0x08: + /* BIC */ + tcg_gen_andc_i64(vc, va, vb); + break; + case 0x14: + /* CMOVLBS */ + tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, va, 1); + tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx), + vb, load_gpr(ctx, rc)); + tcg_temp_free(tmp); + break; + case 0x16: + /* CMOVLBC */ + tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, va, 1); + tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx), + vb, load_gpr(ctx, rc)); + tcg_temp_free(tmp); + break; + case 0x20: + /* BIS */ + tcg_gen_or_i64(vc, va, vb); + break; + case 0x24: + /* CMOVEQ */ + tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); + break; + case 0x26: + /* CMOVNE */ + tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); + break; + case 0x28: + /* ORNOT */ + tcg_gen_orc_i64(vc, va, vb); + break; + case 0x40: + /* XOR */ + tcg_gen_xor_i64(vc, va, vb); + break; + case 0x44: + /* CMOVLT */ + tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); + break; + case 0x46: + /* CMOVGE */ + tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); + break; + case 0x48: + /* EQV */ + tcg_gen_eqv_i64(vc, va, vb); + break; + case 0x61: + /* AMASK */ + REQUIRE_REG_31(ra); + { + uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT; + tcg_gen_andi_i64(vc, vb, ~amask); + } + break; + case 0x64: + /* CMOVLE */ + tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); + break; + case 0x66: + /* CMOVGT */ + tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); + break; + case 0x6C: + /* IMPLVER */ + REQUIRE_REG_31(ra); + tcg_gen_movi_i64(vc, ctx->implver); + break; + default: + goto invalid_opc; + } + break; + + case 0x12: + vc = dest_gpr(ctx, rc); + va = load_gpr(ctx, ra); + switch (fn7) { + case 0x02: + /* MSKBL */ + gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01); + break; + case 0x06: + /* EXTBL */ + gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01); + break; + case 0x0B: + /* INSBL */ + gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01); + break; + case 0x12: + /* MSKWL */ + gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03); + break; + case 0x16: + /* EXTWL */ + gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03); + break; + case 0x1B: + /* INSWL */ + gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03); + break; + case 0x22: + /* MSKLL */ + gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f); + break; + case 0x26: + /* EXTLL */ + gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f); + break; + case 0x2B: + /* INSLL */ + gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f); + break; + case 0x30: + /* ZAP */ + if (islit) { + gen_zapnoti(vc, va, ~lit); + } else { + gen_helper_zap(vc, va, load_gpr(ctx, rb)); + } + break; + case 0x31: + /* ZAPNOT */ + if (islit) { + gen_zapnoti(vc, va, lit); + } else { + gen_helper_zapnot(vc, va, load_gpr(ctx, rb)); + } + break; + case 0x32: + /* MSKQL */ + gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff); + break; + case 0x34: + /* SRL */ + if (islit) { + tcg_gen_shri_i64(vc, va, lit & 0x3f); + } else { + tmp = tcg_temp_new(); + vb = load_gpr(ctx, rb); + tcg_gen_andi_i64(tmp, vb, 0x3f); + tcg_gen_shr_i64(vc, va, tmp); + tcg_temp_free(tmp); + } + break; + case 0x36: + /* EXTQL */ + gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff); + break; + case 0x39: + /* SLL */ + if (islit) { + tcg_gen_shli_i64(vc, va, lit & 0x3f); + } else { + tmp = tcg_temp_new(); + vb = load_gpr(ctx, rb); + tcg_gen_andi_i64(tmp, vb, 0x3f); + tcg_gen_shl_i64(vc, va, tmp); + tcg_temp_free(tmp); + } + break; + case 0x3B: + /* INSQL */ + gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff); + break; + case 0x3C: + /* SRA */ + if (islit) { + tcg_gen_sari_i64(vc, va, lit & 0x3f); + } else { + tmp = tcg_temp_new(); + vb = load_gpr(ctx, rb); + tcg_gen_andi_i64(tmp, vb, 0x3f); + tcg_gen_sar_i64(vc, va, tmp); + tcg_temp_free(tmp); + } + break; + case 0x52: + /* MSKWH */ + gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03); + break; + case 0x57: + /* INSWH */ + gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03); + break; + case 0x5A: + /* EXTWH */ + gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03); + break; + case 0x62: + /* MSKLH */ + gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f); + break; + case 0x67: + /* INSLH */ + gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f); + break; + case 0x6A: + /* EXTLH */ + gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f); + break; + case 0x72: + /* MSKQH */ + gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff); + break; + case 0x77: + /* INSQH */ + gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff); + break; + case 0x7A: + /* EXTQH */ + gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff); + break; + default: + goto invalid_opc; + } + break; + + case 0x13: + vc = dest_gpr(ctx, rc); + vb = load_gpr_lit(ctx, rb, lit, islit); + va = load_gpr(ctx, ra); + switch (fn7) { + case 0x00: + /* MULL */ + tcg_gen_mul_i64(vc, va, vb); + tcg_gen_ext32s_i64(vc, vc); + break; + case 0x20: + /* MULQ */ + tcg_gen_mul_i64(vc, va, vb); + break; + case 0x30: + /* UMULH */ + tmp = tcg_temp_new(); + tcg_gen_mulu2_i64(tmp, vc, va, vb); + tcg_temp_free(tmp); + break; + case 0x40: + /* MULL/V */ + tmp = tcg_temp_new(); + tcg_gen_ext32s_i64(tmp, va); + tcg_gen_ext32s_i64(vc, vb); + tcg_gen_mul_i64(tmp, tmp, vc); + tcg_gen_ext32s_i64(vc, tmp); + gen_helper_check_overflow(cpu_env, vc, tmp); + tcg_temp_free(tmp); + break; + case 0x60: + /* MULQ/V */ + tmp = tcg_temp_new(); + tmp2 = tcg_temp_new(); + tcg_gen_muls2_i64(vc, tmp, va, vb); + tcg_gen_sari_i64(tmp2, vc, 63); + gen_helper_check_overflow(cpu_env, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + default: + goto invalid_opc; + } + break; + + case 0x14: + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX); + vc = dest_fpr(ctx, rc); + switch (fpfn) { /* fn11 & 0x3F */ + case 0x04: + /* ITOFS */ + REQUIRE_REG_31(rb); + t32 = tcg_temp_new_i32(); + va = load_gpr(ctx, ra); + tcg_gen_trunc_i64_i32(t32, va); + gen_helper_memory_to_s(vc, t32); + tcg_temp_free_i32(t32); + break; + case 0x0A: + /* SQRTF */ + REQUIRE_REG_31(ra); + vb = load_fpr(ctx, rb); + gen_helper_sqrtf(vc, cpu_env, vb); + break; + case 0x0B: + /* SQRTS */ + REQUIRE_REG_31(ra); + gen_sqrts(ctx, rb, rc, fn11); + break; + case 0x14: + /* ITOFF */ + REQUIRE_REG_31(rb); + t32 = tcg_temp_new_i32(); + va = load_gpr(ctx, ra); + tcg_gen_trunc_i64_i32(t32, va); + gen_helper_memory_to_f(vc, t32); + tcg_temp_free_i32(t32); + break; + case 0x24: + /* ITOFT */ + REQUIRE_REG_31(rb); + va = load_gpr(ctx, ra); + tcg_gen_mov_i64(vc, va); + break; + case 0x2A: + /* SQRTG */ + REQUIRE_REG_31(ra); + vb = load_fpr(ctx, rb); + gen_helper_sqrtg(vc, cpu_env, vb); + break; + case 0x02B: + /* SQRTT */ + REQUIRE_REG_31(ra); + gen_sqrtt(ctx, rb, rc, fn11); + break; + default: + goto invalid_opc; + } + break; + + case 0x15: + /* VAX floating point */ + /* XXX: rounding mode and trap are ignored (!) */ + vc = dest_fpr(ctx, rc); + vb = load_fpr(ctx, rb); + va = load_fpr(ctx, ra); + switch (fpfn) { /* fn11 & 0x3F */ + case 0x00: + /* ADDF */ + gen_helper_addf(vc, cpu_env, va, vb); + break; + case 0x01: + /* SUBF */ + gen_helper_subf(vc, cpu_env, va, vb); + break; + case 0x02: + /* MULF */ + gen_helper_mulf(vc, cpu_env, va, vb); + break; + case 0x03: + /* DIVF */ + gen_helper_divf(vc, cpu_env, va, vb); + break; + case 0x1E: + /* CVTDG -- TODO */ + REQUIRE_REG_31(ra); + goto invalid_opc; + case 0x20: + /* ADDG */ + gen_helper_addg(vc, cpu_env, va, vb); + break; + case 0x21: + /* SUBG */ + gen_helper_subg(vc, cpu_env, va, vb); + break; + case 0x22: + /* MULG */ + gen_helper_mulg(vc, cpu_env, va, vb); + break; + case 0x23: + /* DIVG */ + gen_helper_divg(vc, cpu_env, va, vb); + break; + case 0x25: + /* CMPGEQ */ + gen_helper_cmpgeq(vc, cpu_env, va, vb); + break; + case 0x26: + /* CMPGLT */ + gen_helper_cmpglt(vc, cpu_env, va, vb); + break; + case 0x27: + /* CMPGLE */ + gen_helper_cmpgle(vc, cpu_env, va, vb); + break; + case 0x2C: + /* CVTGF */ + REQUIRE_REG_31(ra); + gen_helper_cvtgf(vc, cpu_env, vb); + break; + case 0x2D: + /* CVTGD -- TODO */ + REQUIRE_REG_31(ra); + goto invalid_opc; + case 0x2F: + /* CVTGQ */ + REQUIRE_REG_31(ra); + gen_helper_cvtgq(vc, cpu_env, vb); + break; + case 0x3C: + /* CVTQF */ + REQUIRE_REG_31(ra); + gen_helper_cvtqf(vc, cpu_env, vb); + break; + case 0x3E: + /* CVTQG */ + REQUIRE_REG_31(ra); + gen_helper_cvtqg(vc, cpu_env, vb); + break; + default: + goto invalid_opc; + } + break; + + case 0x16: + /* IEEE floating-point */ + switch (fpfn) { /* fn11 & 0x3F */ + case 0x00: + /* ADDS */ + gen_adds(ctx, ra, rb, rc, fn11); + break; + case 0x01: + /* SUBS */ + gen_subs(ctx, ra, rb, rc, fn11); + break; + case 0x02: + /* MULS */ + gen_muls(ctx, ra, rb, rc, fn11); + break; + case 0x03: + /* DIVS */ + gen_divs(ctx, ra, rb, rc, fn11); + break; + case 0x20: + /* ADDT */ + gen_addt(ctx, ra, rb, rc, fn11); + break; + case 0x21: + /* SUBT */ + gen_subt(ctx, ra, rb, rc, fn11); + break; + case 0x22: + /* MULT */ + gen_mult(ctx, ra, rb, rc, fn11); + break; + case 0x23: + /* DIVT */ + gen_divt(ctx, ra, rb, rc, fn11); + break; + case 0x24: + /* CMPTUN */ + gen_cmptun(ctx, ra, rb, rc, fn11); + break; + case 0x25: + /* CMPTEQ */ + gen_cmpteq(ctx, ra, rb, rc, fn11); + break; + case 0x26: + /* CMPTLT */ + gen_cmptlt(ctx, ra, rb, rc, fn11); + break; + case 0x27: + /* CMPTLE */ + gen_cmptle(ctx, ra, rb, rc, fn11); + break; + case 0x2C: + REQUIRE_REG_31(ra); + if (fn11 == 0x2AC || fn11 == 0x6AC) { + /* CVTST */ + gen_cvtst(ctx, rb, rc, fn11); + } else { + /* CVTTS */ + gen_cvtts(ctx, rb, rc, fn11); + } + break; + case 0x2F: + /* CVTTQ */ + REQUIRE_REG_31(ra); + gen_cvttq(ctx, rb, rc, fn11); + break; + case 0x3C: + /* CVTQS */ + REQUIRE_REG_31(ra); + gen_cvtqs(ctx, rb, rc, fn11); + break; + case 0x3E: + /* CVTQT */ + REQUIRE_REG_31(ra); + gen_cvtqt(ctx, rb, rc, fn11); + break; + default: + goto invalid_opc; + } + break; + + case 0x17: + switch (fn11) { + case 0x010: + /* CVTLQ */ + REQUIRE_REG_31(ra); + vc = dest_fpr(ctx, rc); + vb = load_fpr(ctx, rb); + gen_cvtlq(vc, vb); + break; + case 0x020: + /* CPYS */ + if (rc == 31) { + /* Special case CPYS as FNOP. */ + } else { + vc = dest_fpr(ctx, rc); + va = load_fpr(ctx, ra); + if (ra == rb) { + /* Special case CPYS as FMOV. */ + tcg_gen_mov_i64(vc, va); + } else { + vb = load_fpr(ctx, rb); + gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL); + } + } + break; + case 0x021: + /* CPYSN */ + vc = dest_fpr(ctx, rc); + vb = load_fpr(ctx, rb); + va = load_fpr(ctx, ra); + gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL); + break; + case 0x022: + /* CPYSE */ + vc = dest_fpr(ctx, rc); + vb = load_fpr(ctx, rb); + va = load_fpr(ctx, ra); + gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL); + break; + case 0x024: + /* MT_FPCR */ + va = load_fpr(ctx, ra); + gen_helper_store_fpcr(cpu_env, va); + if (ctx->tb_rm == QUAL_RM_D) { + /* Re-do the copy of the rounding mode to fp_status + the next time we use dynamic rounding. */ + ctx->tb_rm = -1; + } + break; + case 0x025: + /* MF_FPCR */ + va = dest_fpr(ctx, ra); + gen_helper_load_fpcr(va, cpu_env); + break; + case 0x02A: + /* FCMOVEQ */ + gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc); + break; + case 0x02B: + /* FCMOVNE */ + gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc); + break; + case 0x02C: + /* FCMOVLT */ + gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc); + break; + case 0x02D: + /* FCMOVGE */ + gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc); + break; + case 0x02E: + /* FCMOVLE */ + gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc); + break; + case 0x02F: + /* FCMOVGT */ + gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc); + break; + case 0x030: /* CVTQL */ + case 0x130: /* CVTQL/V */ + case 0x530: /* CVTQL/SV */ + REQUIRE_REG_31(ra); + vc = dest_fpr(ctx, rc); + vb = load_fpr(ctx, rb); + gen_helper_cvtql(vc, cpu_env, vb); + gen_fp_exc_raise(rc, fn11); + break; + default: + goto invalid_opc; + } + break; + + case 0x18: + switch ((uint16_t)disp16) { + case 0x0000: + /* TRAPB */ + /* No-op. */ + break; + case 0x0400: + /* EXCB */ + /* No-op. */ + break; + case 0x4000: + /* MB */ + /* No-op */ + break; + case 0x4400: + /* WMB */ + /* No-op */ + break; + case 0x8000: + /* FETCH */ + /* No-op */ + break; + case 0xA000: + /* FETCH_M */ + /* No-op */ + break; + case 0xC000: + /* RPCC */ + va = dest_gpr(ctx, ra); + if (ctx->tb->cflags & CF_USE_ICOUNT) { + gen_io_start(); + gen_helper_load_pcc(va, cpu_env); + gen_io_end(); + ret = EXIT_PC_STALE; + } else { + gen_helper_load_pcc(va, cpu_env); + } + break; + case 0xE000: + /* RC */ + gen_rx(ra, 0); + break; + case 0xE800: + /* ECB */ + break; + case 0xF000: + /* RS */ + gen_rx(ra, 1); + break; + case 0xF800: + /* WH64 */ + /* No-op */ + break; + case 0xFC00: + /* WH64EN */ + /* No-op */ + break; + default: + goto invalid_opc; + } + break; + + case 0x19: + /* HW_MFPR (PALcode) */ +#ifndef CONFIG_USER_ONLY + REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + va = dest_gpr(ctx, ra); + ret = gen_mfpr(ctx, va, insn & 0xffff); + break; +#else + goto invalid_opc; +#endif + + case 0x1A: + /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch + prediction stack action, which of course we don't implement. */ + vb = load_gpr(ctx, rb); + tcg_gen_andi_i64(cpu_pc, vb, ~3); + if (ra != 31) { + tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); + } + ret = EXIT_PC_UPDATED; + break; + + case 0x1B: + /* HW_LD (PALcode) */ +#ifndef CONFIG_USER_ONLY + REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + { + TCGv addr = tcg_temp_new(); + vb = load_gpr(ctx, rb); + va = dest_gpr(ctx, ra); + + tcg_gen_addi_i64(addr, vb, disp12); + switch ((insn >> 12) & 0xF) { + case 0x0: + /* Longword physical access (hw_ldl/p) */ + gen_helper_ldl_phys(va, cpu_env, addr); + break; + case 0x1: + /* Quadword physical access (hw_ldq/p) */ + gen_helper_ldq_phys(va, cpu_env, addr); + break; + case 0x2: + /* Longword physical access with lock (hw_ldl_l/p) */ + gen_helper_ldl_l_phys(va, cpu_env, addr); + break; + case 0x3: + /* Quadword physical access with lock (hw_ldq_l/p) */ + gen_helper_ldq_l_phys(va, cpu_env, addr); + break; + case 0x4: + /* Longword virtual PTE fetch (hw_ldl/v) */ + goto invalid_opc; + case 0x5: + /* Quadword virtual PTE fetch (hw_ldq/v) */ + goto invalid_opc; + break; + case 0x6: + /* Incpu_ir[ra]id */ + goto invalid_opc; + case 0x7: + /* Incpu_ir[ra]id */ + goto invalid_opc; + case 0x8: + /* Longword virtual access (hw_ldl) */ + goto invalid_opc; + case 0x9: + /* Quadword virtual access (hw_ldq) */ + goto invalid_opc; + case 0xA: + /* Longword virtual access with protection check (hw_ldl/w) */ + tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL); + break; + case 0xB: + /* Quadword virtual access with protection check (hw_ldq/w) */ + tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ); + break; + case 0xC: + /* Longword virtual access with alt access mode (hw_ldl/a)*/ + goto invalid_opc; + case 0xD: + /* Quadword virtual access with alt access mode (hw_ldq/a) */ + goto invalid_opc; + case 0xE: + /* Longword virtual access with alternate access mode and + protection checks (hw_ldl/wa) */ + tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL); + break; + case 0xF: + /* Quadword virtual access with alternate access mode and + protection checks (hw_ldq/wa) */ + tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ); + break; + } + tcg_temp_free(addr); + break; + } +#else + goto invalid_opc; +#endif + + case 0x1C: + vc = dest_gpr(ctx, rc); + if (fn7 == 0x70) { + /* FTOIT */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX); + REQUIRE_REG_31(rb); + va = load_fpr(ctx, ra); + tcg_gen_mov_i64(vc, va); + break; + } else if (fn7 == 0x78) { + /* FTOIS */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX); + REQUIRE_REG_31(rb); + t32 = tcg_temp_new_i32(); + va = load_fpr(ctx, ra); + gen_helper_s_to_memory(t32, va); + tcg_gen_ext_i32_i64(vc, t32); + tcg_temp_free_i32(t32); + break; + } + + vb = load_gpr_lit(ctx, rb, lit, islit); + switch (fn7) { + case 0x00: + /* SEXTB */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + REQUIRE_REG_31(ra); + tcg_gen_ext8s_i64(vc, vb); + break; + case 0x01: + /* SEXTW */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + REQUIRE_REG_31(ra); + tcg_gen_ext16s_i64(vc, vb); + break; + case 0x30: + /* CTPOP */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX); + REQUIRE_REG_31(ra); + REQUIRE_NO_LIT; + gen_helper_ctpop(vc, vb); + break; + case 0x31: + /* PERR */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_NO_LIT; + va = load_gpr(ctx, ra); + gen_helper_perr(vc, va, vb); + break; + case 0x32: + /* CTLZ */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX); + REQUIRE_REG_31(ra); + REQUIRE_NO_LIT; + gen_helper_ctlz(vc, vb); + break; + case 0x33: + /* CTTZ */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX); + REQUIRE_REG_31(ra); + REQUIRE_NO_LIT; + gen_helper_cttz(vc, vb); + break; + case 0x34: + /* UNPKBW */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_REG_31(ra); + REQUIRE_NO_LIT; + gen_helper_unpkbw(vc, vb); + break; + case 0x35: + /* UNPKBL */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_REG_31(ra); + REQUIRE_NO_LIT; + gen_helper_unpkbl(vc, vb); + break; + case 0x36: + /* PKWB */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_REG_31(ra); + REQUIRE_NO_LIT; + gen_helper_pkwb(vc, vb); + break; + case 0x37: + /* PKLB */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_REG_31(ra); + REQUIRE_NO_LIT; + gen_helper_pklb(vc, vb); + break; + case 0x38: + /* MINSB8 */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_minsb8(vc, va, vb); + break; + case 0x39: + /* MINSW4 */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_minsw4(vc, va, vb); + break; + case 0x3A: + /* MINUB8 */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_minub8(vc, va, vb); + break; + case 0x3B: + /* MINUW4 */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_minuw4(vc, va, vb); + break; + case 0x3C: + /* MAXUB8 */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_maxub8(vc, va, vb); + break; + case 0x3D: + /* MAXUW4 */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_maxuw4(vc, va, vb); + break; + case 0x3E: + /* MAXSB8 */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_maxsb8(vc, va, vb); + break; + case 0x3F: + /* MAXSW4 */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_maxsw4(vc, va, vb); + break; + default: + goto invalid_opc; + } + break; + + case 0x1D: + /* HW_MTPR (PALcode) */ +#ifndef CONFIG_USER_ONLY + REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + vb = load_gpr(ctx, rb); + ret = gen_mtpr(ctx, vb, insn & 0xffff); + break; +#else + goto invalid_opc; +#endif + + case 0x1E: + /* HW_RET (PALcode) */ +#ifndef CONFIG_USER_ONLY + REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + if (rb == 31) { + /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return + address from EXC_ADDR. This turns out to be useful for our + emulation PALcode, so continue to accept it. */ + tmp = tcg_temp_new(); + tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr)); + gen_helper_hw_ret(cpu_env, tmp); + tcg_temp_free(tmp); + } else { + gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb)); + } + ret = EXIT_PC_UPDATED; + break; +#else + goto invalid_opc; +#endif + + case 0x1F: + /* HW_ST (PALcode) */ +#ifndef CONFIG_USER_ONLY + REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + { + TCGv addr = tcg_temp_new(); + va = load_gpr(ctx, ra); + vb = load_gpr(ctx, rb); + + tcg_gen_addi_i64(addr, vb, disp12); + switch ((insn >> 12) & 0xF) { + case 0x0: + /* Longword physical access */ + gen_helper_stl_phys(cpu_env, addr, va); + break; + case 0x1: + /* Quadword physical access */ + gen_helper_stq_phys(cpu_env, addr, va); + break; + case 0x2: + /* Longword physical access with lock */ + gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va); + break; + case 0x3: + /* Quadword physical access with lock */ + gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va); + break; + case 0x4: + /* Longword virtual access */ + goto invalid_opc; + case 0x5: + /* Quadword virtual access */ + goto invalid_opc; + case 0x6: + /* Invalid */ + goto invalid_opc; + case 0x7: + /* Invalid */ + goto invalid_opc; + case 0x8: + /* Invalid */ + goto invalid_opc; + case 0x9: + /* Invalid */ + goto invalid_opc; + case 0xA: + /* Invalid */ + goto invalid_opc; + case 0xB: + /* Invalid */ + goto invalid_opc; + case 0xC: + /* Longword virtual access with alternate access mode */ + goto invalid_opc; + case 0xD: + /* Quadword virtual access with alternate access mode */ + goto invalid_opc; + case 0xE: + /* Invalid */ + goto invalid_opc; + case 0xF: + /* Invalid */ + goto invalid_opc; + } + tcg_temp_free(addr); + break; + } +#else + goto invalid_opc; +#endif + case 0x20: + /* LDF */ + gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0); + break; + case 0x21: + /* LDG */ + gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0); + break; + case 0x22: + /* LDS */ + gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0); + break; + case 0x23: + /* LDT */ + gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0); + break; + case 0x24: + /* STF */ + gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0); + break; + case 0x25: + /* STG */ + gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0); + break; + case 0x26: + /* STS */ + gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0); + break; + case 0x27: + /* STT */ + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0); + break; + case 0x28: + /* LDL */ + gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0); + break; + case 0x29: + /* LDQ */ + gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0); + break; + case 0x2A: + /* LDL_L */ + gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0); + break; + case 0x2B: + /* LDQ_L */ + gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0); + break; + case 0x2C: + /* STL */ + gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0); + break; + case 0x2D: + /* STQ */ + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0); + break; + case 0x2E: + /* STL_C */ + ret = gen_store_conditional(ctx, ra, rb, disp16, 0); + break; + case 0x2F: + /* STQ_C */ + ret = gen_store_conditional(ctx, ra, rb, disp16, 1); + break; + case 0x30: + /* BR */ + ret = gen_bdirect(ctx, ra, disp21); + break; + case 0x31: /* FBEQ */ + ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21); + break; + case 0x32: /* FBLT */ + ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21); + break; + case 0x33: /* FBLE */ + ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21); + break; + case 0x34: + /* BSR */ + ret = gen_bdirect(ctx, ra, disp21); + break; + case 0x35: /* FBNE */ + ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21); + break; + case 0x36: /* FBGE */ + ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21); + break; + case 0x37: /* FBGT */ + ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21); + break; + case 0x38: + /* BLBC */ + ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1); + break; + case 0x39: + /* BEQ */ + ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0); + break; + case 0x3A: + /* BLT */ + ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0); + break; + case 0x3B: + /* BLE */ + ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0); + break; + case 0x3C: + /* BLBS */ + ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1); + break; + case 0x3D: + /* BNE */ + ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0); + break; + case 0x3E: + /* BGE */ + ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0); + break; + case 0x3F: + /* BGT */ + ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0); + break; + invalid_opc: + ret = gen_invalid(ctx); + break; + } + + return ret; +} + +static inline void gen_intermediate_code_internal(AlphaCPU *cpu, + TranslationBlock *tb, + bool search_pc) +{ + CPUState *cs = CPU(cpu); + CPUAlphaState *env = &cpu->env; + DisasContext ctx, *ctxp = &ctx; + target_ulong pc_start; + target_ulong pc_mask; + uint32_t insn; + CPUBreakpoint *bp; + int j, lj = -1; + ExitStatus ret; + int num_insns; + int max_insns; + + pc_start = tb->pc; + + ctx.tb = tb; + ctx.pc = pc_start; + ctx.mem_idx = cpu_mmu_index(env); + ctx.implver = env->implver; + ctx.singlestep_enabled = cs->singlestep_enabled; + + /* ??? Every TB begins with unset rounding mode, to be initialized on + the first fp insn of the TB. Alternately we could define a proper + default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure + to reset the FP_STATUS to that default at the end of any TB that + changes the default. We could even (gasp) dynamiclly figure out + what default would be most efficient given the running program. */ + ctx.tb_rm = -1; + /* Similarly for flush-to-zero. */ + ctx.tb_ftz = -1; + + num_insns = 0; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) { + max_insns = CF_COUNT_MASK; + } + + if (in_superpage(&ctx, pc_start)) { + pc_mask = (1ULL << 41) - 1; + } else { + pc_mask = ~TARGET_PAGE_MASK; + } + + gen_tb_start(tb); + do { + if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { + QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { + if (bp->pc == ctx.pc) { + gen_excp(&ctx, EXCP_DEBUG, 0); + break; + } + } + } + if (search_pc) { + j = tcg_op_buf_count(); + if (lj < j) { + lj++; + while (lj < j) { + tcg_ctx.gen_opc_instr_start[lj++] = 0; + } + } + tcg_ctx.gen_opc_pc[lj] = ctx.pc; + tcg_ctx.gen_opc_instr_start[lj] = 1; + tcg_ctx.gen_opc_icount[lj] = num_insns; + } + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) { + gen_io_start(); + } + insn = cpu_ldl_code(env, ctx.pc); + num_insns++; + + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { + tcg_gen_debug_insn_start(ctx.pc); + } + + TCGV_UNUSED_I64(ctx.zero); + TCGV_UNUSED_I64(ctx.sink); + TCGV_UNUSED_I64(ctx.lit); + + ctx.pc += 4; + ret = translate_one(ctxp, insn); + + if (!TCGV_IS_UNUSED_I64(ctx.sink)) { + tcg_gen_discard_i64(ctx.sink); + tcg_temp_free(ctx.sink); + } + if (!TCGV_IS_UNUSED_I64(ctx.zero)) { + tcg_temp_free(ctx.zero); + } + if (!TCGV_IS_UNUSED_I64(ctx.lit)) { + tcg_temp_free(ctx.lit); + } + + /* If we reach a page boundary, are single stepping, + or exhaust instruction count, stop generation. */ + if (ret == NO_EXIT + && ((ctx.pc & pc_mask) == 0 + || tcg_op_buf_full() + || num_insns >= max_insns + || singlestep + || ctx.singlestep_enabled)) { + ret = EXIT_PC_STALE; + } + } while (ret == NO_EXIT); + + if (tb->cflags & CF_LAST_IO) { + gen_io_end(); + } + + switch (ret) { + case EXIT_GOTO_TB: + case EXIT_NORETURN: + break; + case EXIT_PC_STALE: + tcg_gen_movi_i64(cpu_pc, ctx.pc); + /* FALLTHRU */ + case EXIT_PC_UPDATED: + if (ctx.singlestep_enabled) { + gen_excp_1(EXCP_DEBUG, 0); + } else { + tcg_gen_exit_tb(0); + } + break; + default: + abort(); + } + + gen_tb_end(tb, num_insns); + + if (search_pc) { + j = tcg_op_buf_count(); + lj++; + while (lj <= j) { + tcg_ctx.gen_opc_instr_start[lj++] = 0; + } + } else { + tb->size = ctx.pc - pc_start; + tb->icount = num_insns; + } + +#ifdef DEBUG_DISAS + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + qemu_log("IN: %s\n", lookup_symbol(pc_start)); + log_target_disas(cs, pc_start, ctx.pc - pc_start, 1); + qemu_log("\n"); + } +#endif +} + +void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb) +{ + gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false); +} + +void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb) +{ + gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true); +} + +void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos) +{ + env->pc = tcg_ctx.gen_opc_pc[pc_pos]; +} diff --git a/qemu/target-alpha/vax_helper.c b/qemu/target-alpha/vax_helper.c new file mode 100644 index 000000000..2e2f49971 --- /dev/null +++ b/qemu/target-alpha/vax_helper.c @@ -0,0 +1,353 @@ +/* + * Helpers for vax floating point instructions. + * + * Copyright (c) 2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" + +#define FP_STATUS (env->fp_status) + + +/* F floating (VAX) */ +static uint64_t float32_to_f(float32 fa) +{ + uint64_t r, exp, mant, sig; + CPU_FloatU a; + + a.f = fa; + sig = ((uint64_t)a.l & 0x80000000) << 32; + exp = (a.l >> 23) & 0xff; + mant = ((uint64_t)a.l & 0x007fffff) << 29; + + if (exp == 255) { + /* NaN or infinity */ + r = 1; /* VAX dirty zero */ + } else if (exp == 0) { + if (mant == 0) { + /* Zero */ + r = 0; + } else { + /* Denormalized */ + r = sig | ((exp + 1) << 52) | mant; + } + } else { + if (exp >= 253) { + /* Overflow */ + r = 1; /* VAX dirty zero */ + } else { + r = sig | ((exp + 2) << 52); + } + } + + return r; +} + +static float32 f_to_float32(CPUAlphaState *env, uintptr_t retaddr, uint64_t a) +{ + uint32_t exp, mant_sig; + CPU_FloatU r; + + exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f); + mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff); + + if (unlikely(!exp && mant_sig)) { + /* Reserved operands / Dirty zero */ + dynamic_excp(env, retaddr, EXCP_OPCDEC, 0); + } + + if (exp < 3) { + /* Underflow */ + r.l = 0; + } else { + r.l = ((exp - 2) << 23) | mant_sig; + } + + return r.f; +} + +uint32_t helper_f_to_memory(uint64_t a) +{ + uint32_t r; + r = (a & 0x00001fffe0000000ull) >> 13; + r |= (a & 0x07ffe00000000000ull) >> 45; + r |= (a & 0xc000000000000000ull) >> 48; + return r; +} + +uint64_t helper_memory_to_f(uint32_t a) +{ + uint64_t r; + r = ((uint64_t)(a & 0x0000c000)) << 48; + r |= ((uint64_t)(a & 0x003fffff)) << 45; + r |= ((uint64_t)(a & 0xffff0000)) << 13; + if (!(a & 0x00004000)) { + r |= 0x7ll << 59; + } + return r; +} + +/* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should + either implement VAX arithmetic properly or just signal invalid opcode. */ + +uint64_t helper_addf(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = f_to_float32(env, GETPC(), a); + fb = f_to_float32(env, GETPC(), b); + fr = float32_add(fa, fb, &FP_STATUS); + return float32_to_f(fr); +} + +uint64_t helper_subf(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = f_to_float32(env, GETPC(), a); + fb = f_to_float32(env, GETPC(), b); + fr = float32_sub(fa, fb, &FP_STATUS); + return float32_to_f(fr); +} + +uint64_t helper_mulf(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = f_to_float32(env, GETPC(), a); + fb = f_to_float32(env, GETPC(), b); + fr = float32_mul(fa, fb, &FP_STATUS); + return float32_to_f(fr); +} + +uint64_t helper_divf(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = f_to_float32(env, GETPC(), a); + fb = f_to_float32(env, GETPC(), b); + fr = float32_div(fa, fb, &FP_STATUS); + return float32_to_f(fr); +} + +uint64_t helper_sqrtf(CPUAlphaState *env, uint64_t t) +{ + float32 ft, fr; + + ft = f_to_float32(env, GETPC(), t); + fr = float32_sqrt(ft, &FP_STATUS); + return float32_to_f(fr); +} + + +/* G floating (VAX) */ +static uint64_t float64_to_g(float64 fa) +{ + uint64_t r, exp, mant, sig; + CPU_DoubleU a; + + a.d = fa; + sig = a.ll & 0x8000000000000000ull; + exp = (a.ll >> 52) & 0x7ff; + mant = a.ll & 0x000fffffffffffffull; + + if (exp == 2047) { + /* NaN or infinity */ + r = 1; /* VAX dirty zero */ + } else if (exp == 0) { + if (mant == 0) { + /* Zero */ + r = 0; + } else { + /* Denormalized */ + r = sig | ((exp + 1) << 52) | mant; + } + } else { + if (exp >= 2045) { + /* Overflow */ + r = 1; /* VAX dirty zero */ + } else { + r = sig | ((exp + 2) << 52); + } + } + + return r; +} + +static float64 g_to_float64(CPUAlphaState *env, uintptr_t retaddr, uint64_t a) +{ + uint64_t exp, mant_sig; + CPU_DoubleU r; + + exp = (a >> 52) & 0x7ff; + mant_sig = a & 0x800fffffffffffffull; + + if (!exp && mant_sig) { + /* Reserved operands / Dirty zero */ + dynamic_excp(env, retaddr, EXCP_OPCDEC, 0); + } + + if (exp < 3) { + /* Underflow */ + r.ll = 0; + } else { + r.ll = ((exp - 2) << 52) | mant_sig; + } + + return r.d; +} + +uint64_t helper_g_to_memory(uint64_t a) +{ + uint64_t r; + r = (a & 0x000000000000ffffull) << 48; + r |= (a & 0x00000000ffff0000ull) << 16; + r |= (a & 0x0000ffff00000000ull) >> 16; + r |= (a & 0xffff000000000000ull) >> 48; + return r; +} + +uint64_t helper_memory_to_g(uint64_t a) +{ + uint64_t r; + r = (a & 0x000000000000ffffull) << 48; + r |= (a & 0x00000000ffff0000ull) << 16; + r |= (a & 0x0000ffff00000000ull) >> 16; + r |= (a & 0xffff000000000000ull) >> 48; + return r; +} + +uint64_t helper_addg(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = g_to_float64(env, GETPC(), a); + fb = g_to_float64(env, GETPC(), b); + fr = float64_add(fa, fb, &FP_STATUS); + return float64_to_g(fr); +} + +uint64_t helper_subg(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = g_to_float64(env, GETPC(), a); + fb = g_to_float64(env, GETPC(), b); + fr = float64_sub(fa, fb, &FP_STATUS); + return float64_to_g(fr); +} + +uint64_t helper_mulg(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = g_to_float64(env, GETPC(), a); + fb = g_to_float64(env, GETPC(), b); + fr = float64_mul(fa, fb, &FP_STATUS); + return float64_to_g(fr); +} + +uint64_t helper_divg(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = g_to_float64(env, GETPC(), a); + fb = g_to_float64(env, GETPC(), b); + fr = float64_div(fa, fb, &FP_STATUS); + return float64_to_g(fr); +} + +uint64_t helper_sqrtg(CPUAlphaState *env, uint64_t a) +{ + float64 fa, fr; + + fa = g_to_float64(env, GETPC(), a); + fr = float64_sqrt(fa, &FP_STATUS); + return float64_to_g(fr); +} + +uint64_t helper_cmpgeq(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + + fa = g_to_float64(env, GETPC(), a); + fb = g_to_float64(env, GETPC(), b); + + if (float64_eq_quiet(fa, fb, &FP_STATUS)) { + return 0x4000000000000000ULL; + } else { + return 0; + } +} + +uint64_t helper_cmpgle(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + + fa = g_to_float64(env, GETPC(), a); + fb = g_to_float64(env, GETPC(), b); + + if (float64_le(fa, fb, &FP_STATUS)) { + return 0x4000000000000000ULL; + } else { + return 0; + } +} + +uint64_t helper_cmpglt(CPUAlphaState *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + + fa = g_to_float64(env, GETPC(), a); + fb = g_to_float64(env, GETPC(), b); + + if (float64_lt(fa, fb, &FP_STATUS)) { + return 0x4000000000000000ULL; + } else { + return 0; + } +} + +uint64_t helper_cvtqf(CPUAlphaState *env, uint64_t a) +{ + float32 fr = int64_to_float32(a, &FP_STATUS); + return float32_to_f(fr); +} + +uint64_t helper_cvtgf(CPUAlphaState *env, uint64_t a) +{ + float64 fa; + float32 fr; + + fa = g_to_float64(env, GETPC(), a); + fr = float64_to_float32(fa, &FP_STATUS); + return float32_to_f(fr); +} + +uint64_t helper_cvtgq(CPUAlphaState *env, uint64_t a) +{ + float64 fa = g_to_float64(env, GETPC(), a); + return float64_to_int64_round_to_zero(fa, &FP_STATUS); +} + +uint64_t helper_cvtqg(CPUAlphaState *env, uint64_t a) +{ + float64 fr; + fr = int64_to_float64(a, &FP_STATUS); + return float64_to_g(fr); +} |