diff options
Diffstat (limited to 'qemu/linux-user/main.c')
-rw-r--r-- | qemu/linux-user/main.c | 4411 |
1 files changed, 4411 insertions, 0 deletions
diff --git a/qemu/linux-user/main.c b/qemu/linux-user/main.c new file mode 100644 index 000000000..fdee98135 --- /dev/null +++ b/qemu/linux-user/main.c @@ -0,0 +1,4411 @@ +/* + * qemu user main + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ +#include <stdlib.h> +#include <stdio.h> +#include <stdarg.h> +#include <string.h> +#include <errno.h> +#include <unistd.h> +#include <sys/mman.h> +#include <sys/syscall.h> +#include <sys/resource.h> + +#include "qemu.h" +#include "qemu-common.h" +#include "cpu.h" +#include "tcg.h" +#include "qemu/timer.h" +#include "qemu/envlist.h" +#include "elf.h" + +char *exec_path; + +int singlestep; +const char *filename; +const char *argv0; +int gdbstub_port; +envlist_t *envlist; +static const char *cpu_model; +unsigned long mmap_min_addr; +#if defined(CONFIG_USE_GUEST_BASE) +unsigned long guest_base; +int have_guest_base; +#if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64) +/* + * When running 32-on-64 we should make sure we can fit all of the possible + * guest address space into a contiguous chunk of virtual host memory. + * + * This way we will never overlap with our own libraries or binaries or stack + * or anything else that QEMU maps. + */ +# ifdef TARGET_MIPS +/* MIPS only supports 31 bits of virtual address space for user space */ +unsigned long reserved_va = 0x77000000; +# else +unsigned long reserved_va = 0xf7000000; +# endif +#else +unsigned long reserved_va; +#endif +#endif + +static void usage(void); + +static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX; +const char *qemu_uname_release; + +/* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so + we allocate a bigger stack. Need a better solution, for example + by remapping the process stack directly at the right place */ +unsigned long guest_stack_size = 8 * 1024 * 1024UL; + +void gemu_log(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); +} + +#if defined(TARGET_I386) +int cpu_get_pic_interrupt(CPUX86State *env) +{ + return -1; +} +#endif + +/***********************************************************/ +/* Helper routines for implementing atomic operations. */ + +/* To implement exclusive operations we force all cpus to syncronise. + We don't require a full sync, only that no cpus are executing guest code. + The alternative is to map target atomic ops onto host equivalents, + which requires quite a lot of per host/target work. */ +static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER; +static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER; +static int pending_cpus; + +/* Make sure everything is in a consistent state for calling fork(). */ +void fork_start(void) +{ + pthread_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); + pthread_mutex_lock(&exclusive_lock); + mmap_fork_start(); +} + +void fork_end(int child) +{ + mmap_fork_end(child); + if (child) { + CPUState *cpu, *next_cpu; + /* Child processes created by fork() only have a single thread. + Discard information about the parent threads. */ + CPU_FOREACH_SAFE(cpu, next_cpu) { + if (cpu != thread_cpu) { + QTAILQ_REMOVE(&cpus, thread_cpu, node); + } + } + pending_cpus = 0; + pthread_mutex_init(&exclusive_lock, NULL); + pthread_mutex_init(&cpu_list_mutex, NULL); + pthread_cond_init(&exclusive_cond, NULL); + pthread_cond_init(&exclusive_resume, NULL); + pthread_mutex_init(&tcg_ctx.tb_ctx.tb_lock, NULL); + gdbserver_fork(thread_cpu); + } else { + pthread_mutex_unlock(&exclusive_lock); + pthread_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); + } +} + +/* Wait for pending exclusive operations to complete. The exclusive lock + must be held. */ +static inline void exclusive_idle(void) +{ + while (pending_cpus) { + pthread_cond_wait(&exclusive_resume, &exclusive_lock); + } +} + +/* Start an exclusive operation. + Must only be called from outside cpu_arm_exec. */ +static inline void start_exclusive(void) +{ + CPUState *other_cpu; + + pthread_mutex_lock(&exclusive_lock); + exclusive_idle(); + + pending_cpus = 1; + /* Make all other cpus stop executing. */ + CPU_FOREACH(other_cpu) { + if (other_cpu->running) { + pending_cpus++; + cpu_exit(other_cpu); + } + } + if (pending_cpus > 1) { + pthread_cond_wait(&exclusive_cond, &exclusive_lock); + } +} + +/* Finish an exclusive operation. */ +static inline void __attribute__((unused)) end_exclusive(void) +{ + pending_cpus = 0; + pthread_cond_broadcast(&exclusive_resume); + pthread_mutex_unlock(&exclusive_lock); +} + +/* Wait for exclusive ops to finish, and begin cpu execution. */ +static inline void cpu_exec_start(CPUState *cpu) +{ + pthread_mutex_lock(&exclusive_lock); + exclusive_idle(); + cpu->running = true; + pthread_mutex_unlock(&exclusive_lock); +} + +/* Mark cpu as not executing, and release pending exclusive ops. */ +static inline void cpu_exec_end(CPUState *cpu) +{ + pthread_mutex_lock(&exclusive_lock); + cpu->running = false; + if (pending_cpus > 1) { + pending_cpus--; + if (pending_cpus == 1) { + pthread_cond_signal(&exclusive_cond); + } + } + exclusive_idle(); + pthread_mutex_unlock(&exclusive_lock); +} + +void cpu_list_lock(void) +{ + pthread_mutex_lock(&cpu_list_mutex); +} + +void cpu_list_unlock(void) +{ + pthread_mutex_unlock(&cpu_list_mutex); +} + + +#ifdef TARGET_I386 +/***********************************************************/ +/* CPUX86 core interface */ + +uint64_t cpu_get_tsc(CPUX86State *env) +{ + return cpu_get_real_ticks(); +} + +static void write_dt(void *ptr, unsigned long addr, unsigned long limit, + int flags) +{ + unsigned int e1, e2; + uint32_t *p; + e1 = (addr << 16) | (limit & 0xffff); + e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000); + e2 |= flags; + p = ptr; + p[0] = tswap32(e1); + p[1] = tswap32(e2); +} + +static uint64_t *idt_table; +#ifdef TARGET_X86_64 +static void set_gate64(void *ptr, unsigned int type, unsigned int dpl, + uint64_t addr, unsigned int sel) +{ + uint32_t *p, e1, e2; + e1 = (addr & 0xffff) | (sel << 16); + e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8); + p = ptr; + p[0] = tswap32(e1); + p[1] = tswap32(e2); + p[2] = tswap32(addr >> 32); + p[3] = 0; +} +/* only dpl matters as we do only user space emulation */ +static void set_idt(int n, unsigned int dpl) +{ + set_gate64(idt_table + n * 2, 0, dpl, 0, 0); +} +#else +static void set_gate(void *ptr, unsigned int type, unsigned int dpl, + uint32_t addr, unsigned int sel) +{ + uint32_t *p, e1, e2; + e1 = (addr & 0xffff) | (sel << 16); + e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8); + p = ptr; + p[0] = tswap32(e1); + p[1] = tswap32(e2); +} + +/* only dpl matters as we do only user space emulation */ +static void set_idt(int n, unsigned int dpl) +{ + set_gate(idt_table + n, 0, dpl, 0, 0); +} +#endif + +void cpu_loop(CPUX86State *env) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + int trapnr; + abi_ulong pc; + target_siginfo_t info; + + for(;;) { + cpu_exec_start(cs); + trapnr = cpu_x86_exec(cs); + cpu_exec_end(cs); + switch(trapnr) { + case 0x80: + /* linux syscall from int $0x80 */ + env->regs[R_EAX] = do_syscall(env, + env->regs[R_EAX], + env->regs[R_EBX], + env->regs[R_ECX], + env->regs[R_EDX], + env->regs[R_ESI], + env->regs[R_EDI], + env->regs[R_EBP], + 0, 0); + break; +#ifndef TARGET_ABI32 + case EXCP_SYSCALL: + /* linux syscall from syscall instruction */ + env->regs[R_EAX] = do_syscall(env, + env->regs[R_EAX], + env->regs[R_EDI], + env->regs[R_ESI], + env->regs[R_EDX], + env->regs[10], + env->regs[8], + env->regs[9], + 0, 0); + break; +#endif + case EXCP0B_NOSEG: + case EXCP0C_STACK: + info.si_signo = TARGET_SIGBUS; + info.si_errno = 0; + info.si_code = TARGET_SI_KERNEL; + info._sifields._sigfault._addr = 0; + queue_signal(env, info.si_signo, &info); + break; + case EXCP0D_GPF: + /* XXX: potential problem if ABI32 */ +#ifndef TARGET_X86_64 + if (env->eflags & VM_MASK) { + handle_vm86_fault(env); + } else +#endif + { + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SI_KERNEL; + info._sifields._sigfault._addr = 0; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP0E_PAGE: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + if (!(env->error_code & 1)) + info.si_code = TARGET_SEGV_MAPERR; + else + info.si_code = TARGET_SEGV_ACCERR; + info._sifields._sigfault._addr = env->cr[2]; + queue_signal(env, info.si_signo, &info); + break; + case EXCP00_DIVZ: +#ifndef TARGET_X86_64 + if (env->eflags & VM_MASK) { + handle_vm86_trap(env, trapnr); + } else +#endif + { + /* division by zero */ + info.si_signo = TARGET_SIGFPE; + info.si_errno = 0; + info.si_code = TARGET_FPE_INTDIV; + info._sifields._sigfault._addr = env->eip; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP01_DB: + case EXCP03_INT3: +#ifndef TARGET_X86_64 + if (env->eflags & VM_MASK) { + handle_vm86_trap(env, trapnr); + } else +#endif + { + info.si_signo = TARGET_SIGTRAP; + info.si_errno = 0; + if (trapnr == EXCP01_DB) { + info.si_code = TARGET_TRAP_BRKPT; + info._sifields._sigfault._addr = env->eip; + } else { + info.si_code = TARGET_SI_KERNEL; + info._sifields._sigfault._addr = 0; + } + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP04_INTO: + case EXCP05_BOUND: +#ifndef TARGET_X86_64 + if (env->eflags & VM_MASK) { + handle_vm86_trap(env, trapnr); + } else +#endif + { + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SI_KERNEL; + info._sifields._sigfault._addr = 0; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP06_ILLOP: + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_ILLOPN; + info._sifields._sigfault._addr = env->eip; + queue_signal(env, info.si_signo, &info); + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_DEBUG: + { + int sig; + + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) + { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + default: + pc = env->segs[R_CS].base + env->eip; + fprintf(stderr, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n", + (long)pc, trapnr); + abort(); + } + process_pending_signals(env); + } +} +#endif + +#ifdef TARGET_ARM + +#define get_user_code_u32(x, gaddr, doswap) \ + ({ abi_long __r = get_user_u32((x), (gaddr)); \ + if (!__r && (doswap)) { \ + (x) = bswap32(x); \ + } \ + __r; \ + }) + +#define get_user_code_u16(x, gaddr, doswap) \ + ({ abi_long __r = get_user_u16((x), (gaddr)); \ + if (!__r && (doswap)) { \ + (x) = bswap16(x); \ + } \ + __r; \ + }) + +#ifdef TARGET_ABI32 +/* Commpage handling -- there is no commpage for AArch64 */ + +/* + * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt + * Input: + * r0 = pointer to oldval + * r1 = pointer to newval + * r2 = pointer to target value + * + * Output: + * r0 = 0 if *ptr was changed, non-0 if no exchange happened + * C set if *ptr was changed, clear if no exchange happened + * + * Note segv's in kernel helpers are a bit tricky, we can set the + * data address sensibly but the PC address is just the entry point. + */ +static void arm_kernel_cmpxchg64_helper(CPUARMState *env) +{ + uint64_t oldval, newval, val; + uint32_t addr, cpsr; + target_siginfo_t info; + + /* Based on the 32 bit code in do_kernel_trap */ + + /* XXX: This only works between threads, not between processes. + It's probably possible to implement this with native host + operations. However things like ldrex/strex are much harder so + there's not much point trying. */ + start_exclusive(); + cpsr = cpsr_read(env); + addr = env->regs[2]; + + if (get_user_u64(oldval, env->regs[0])) { + env->exception.vaddress = env->regs[0]; + goto segv; + }; + + if (get_user_u64(newval, env->regs[1])) { + env->exception.vaddress = env->regs[1]; + goto segv; + }; + + if (get_user_u64(val, addr)) { + env->exception.vaddress = addr; + goto segv; + } + + if (val == oldval) { + val = newval; + + if (put_user_u64(val, addr)) { + env->exception.vaddress = addr; + goto segv; + }; + + env->regs[0] = 0; + cpsr |= CPSR_C; + } else { + env->regs[0] = -1; + cpsr &= ~CPSR_C; + } + cpsr_write(env, cpsr, CPSR_C); + end_exclusive(); + return; + +segv: + end_exclusive(); + /* We get the PC of the entry address - which is as good as anything, + on a real kernel what you get depends on which mode it uses. */ + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->exception.vaddress; + queue_signal(env, info.si_signo, &info); +} + +/* Handle a jump to the kernel code page. */ +static int +do_kernel_trap(CPUARMState *env) +{ + uint32_t addr; + uint32_t cpsr; + uint32_t val; + + switch (env->regs[15]) { + case 0xffff0fa0: /* __kernel_memory_barrier */ + /* ??? No-op. Will need to do better for SMP. */ + break; + case 0xffff0fc0: /* __kernel_cmpxchg */ + /* XXX: This only works between threads, not between processes. + It's probably possible to implement this with native host + operations. However things like ldrex/strex are much harder so + there's not much point trying. */ + start_exclusive(); + cpsr = cpsr_read(env); + addr = env->regs[2]; + /* FIXME: This should SEGV if the access fails. */ + if (get_user_u32(val, addr)) + val = ~env->regs[0]; + if (val == env->regs[0]) { + val = env->regs[1]; + /* FIXME: Check for segfaults. */ + put_user_u32(val, addr); + env->regs[0] = 0; + cpsr |= CPSR_C; + } else { + env->regs[0] = -1; + cpsr &= ~CPSR_C; + } + cpsr_write(env, cpsr, CPSR_C); + end_exclusive(); + break; + case 0xffff0fe0: /* __kernel_get_tls */ + env->regs[0] = cpu_get_tls(env); + break; + case 0xffff0f60: /* __kernel_cmpxchg64 */ + arm_kernel_cmpxchg64_helper(env); + break; + + default: + return 1; + } + /* Jump back to the caller. */ + addr = env->regs[14]; + if (addr & 1) { + env->thumb = 1; + addr &= ~1; + } + env->regs[15] = addr; + + return 0; +} + +/* Store exclusive handling for AArch32 */ +static int do_strex(CPUARMState *env) +{ + uint64_t val; + int size; + int rc = 1; + int segv = 0; + uint32_t addr; + start_exclusive(); + if (env->exclusive_addr != env->exclusive_test) { + goto fail; + } + /* We know we're always AArch32 so the address is in uint32_t range + * unless it was the -1 exclusive-monitor-lost value (which won't + * match exclusive_test above). + */ + assert(extract64(env->exclusive_addr, 32, 32) == 0); + addr = env->exclusive_addr; + size = env->exclusive_info & 0xf; + switch (size) { + case 0: + segv = get_user_u8(val, addr); + break; + case 1: + segv = get_user_u16(val, addr); + break; + case 2: + case 3: + segv = get_user_u32(val, addr); + break; + default: + abort(); + } + if (segv) { + env->exception.vaddress = addr; + goto done; + } + if (size == 3) { + uint32_t valhi; + segv = get_user_u32(valhi, addr + 4); + if (segv) { + env->exception.vaddress = addr + 4; + goto done; + } + val = deposit64(val, 32, 32, valhi); + } + if (val != env->exclusive_val) { + goto fail; + } + + val = env->regs[(env->exclusive_info >> 8) & 0xf]; + switch (size) { + case 0: + segv = put_user_u8(val, addr); + break; + case 1: + segv = put_user_u16(val, addr); + break; + case 2: + case 3: + segv = put_user_u32(val, addr); + break; + } + if (segv) { + env->exception.vaddress = addr; + goto done; + } + if (size == 3) { + val = env->regs[(env->exclusive_info >> 12) & 0xf]; + segv = put_user_u32(val, addr + 4); + if (segv) { + env->exception.vaddress = addr + 4; + goto done; + } + } + rc = 0; +fail: + env->regs[15] += 4; + env->regs[(env->exclusive_info >> 4) & 0xf] = rc; +done: + end_exclusive(); + return segv; +} + +void cpu_loop(CPUARMState *env) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + int trapnr; + unsigned int n, insn; + target_siginfo_t info; + uint32_t addr; + + for(;;) { + cpu_exec_start(cs); + trapnr = cpu_arm_exec(cs); + cpu_exec_end(cs); + switch(trapnr) { + case EXCP_UDEF: + { + TaskState *ts = cs->opaque; + uint32_t opcode; + int rc; + + /* we handle the FPU emulation here, as Linux */ + /* we get the opcode */ + /* FIXME - what to do if get_user() fails? */ + get_user_code_u32(opcode, env->regs[15], env->bswap_code); + + rc = EmulateAll(opcode, &ts->fpa, env); + if (rc == 0) { /* illegal instruction */ + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_ILLOPN; + info._sifields._sigfault._addr = env->regs[15]; + queue_signal(env, info.si_signo, &info); + } else if (rc < 0) { /* FP exception */ + int arm_fpe=0; + + /* translate softfloat flags to FPSR flags */ + if (-rc & float_flag_invalid) + arm_fpe |= BIT_IOC; + if (-rc & float_flag_divbyzero) + arm_fpe |= BIT_DZC; + if (-rc & float_flag_overflow) + arm_fpe |= BIT_OFC; + if (-rc & float_flag_underflow) + arm_fpe |= BIT_UFC; + if (-rc & float_flag_inexact) + arm_fpe |= BIT_IXC; + + FPSR fpsr = ts->fpa.fpsr; + //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe); + + if (fpsr & (arm_fpe << 16)) { /* exception enabled? */ + info.si_signo = TARGET_SIGFPE; + info.si_errno = 0; + + /* ordered by priority, least first */ + if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES; + if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND; + if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF; + if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV; + if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV; + + info._sifields._sigfault._addr = env->regs[15]; + queue_signal(env, info.si_signo, &info); + } else { + env->regs[15] += 4; + } + + /* accumulate unenabled exceptions */ + if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC)) + fpsr |= BIT_IXC; + if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC)) + fpsr |= BIT_UFC; + if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC)) + fpsr |= BIT_OFC; + if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC)) + fpsr |= BIT_DZC; + if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC)) + fpsr |= BIT_IOC; + ts->fpa.fpsr=fpsr; + } else { /* everything OK */ + /* increment PC */ + env->regs[15] += 4; + } + } + break; + case EXCP_SWI: + case EXCP_BKPT: + { + env->eabi = 1; + /* system call */ + if (trapnr == EXCP_BKPT) { + if (env->thumb) { + /* FIXME - what to do if get_user() fails? */ + get_user_code_u16(insn, env->regs[15], env->bswap_code); + n = insn & 0xff; + env->regs[15] += 2; + } else { + /* FIXME - what to do if get_user() fails? */ + get_user_code_u32(insn, env->regs[15], env->bswap_code); + n = (insn & 0xf) | ((insn >> 4) & 0xff0); + env->regs[15] += 4; + } + } else { + if (env->thumb) { + /* FIXME - what to do if get_user() fails? */ + get_user_code_u16(insn, env->regs[15] - 2, + env->bswap_code); + n = insn & 0xff; + } else { + /* FIXME - what to do if get_user() fails? */ + get_user_code_u32(insn, env->regs[15] - 4, + env->bswap_code); + n = insn & 0xffffff; + } + } + + if (n == ARM_NR_cacheflush) { + /* nop */ + } else if (n == ARM_NR_semihosting + || n == ARM_NR_thumb_semihosting) { + env->regs[0] = do_arm_semihosting (env); + } else if (n == 0 || n >= ARM_SYSCALL_BASE || env->thumb) { + /* linux syscall */ + if (env->thumb || n == 0) { + n = env->regs[7]; + } else { + n -= ARM_SYSCALL_BASE; + env->eabi = 0; + } + if ( n > ARM_NR_BASE) { + switch (n) { + case ARM_NR_cacheflush: + /* nop */ + break; + case ARM_NR_set_tls: + cpu_set_tls(env, env->regs[0]); + env->regs[0] = 0; + break; + case ARM_NR_breakpoint: + env->regs[15] -= env->thumb ? 2 : 4; + goto excp_debug; + default: + gemu_log("qemu: Unsupported ARM syscall: 0x%x\n", + n); + env->regs[0] = -TARGET_ENOSYS; + break; + } + } else { + env->regs[0] = do_syscall(env, + n, + env->regs[0], + env->regs[1], + env->regs[2], + env->regs[3], + env->regs[4], + env->regs[5], + 0, 0); + } + } else { + goto error; + } + } + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_STREX: + if (!do_strex(env)) { + break; + } + /* fall through for segv */ + case EXCP_PREFETCH_ABORT: + case EXCP_DATA_ABORT: + addr = env->exception.vaddress; + { + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = addr; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP_DEBUG: + excp_debug: + { + int sig; + + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) + { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + case EXCP_KERNEL_TRAP: + if (do_kernel_trap(env)) + goto error; + break; + default: + error: + fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", + trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + abort(); + } + process_pending_signals(env); + } +} + +#else + +/* + * Handle AArch64 store-release exclusive + * + * rs = gets the status result of store exclusive + * rt = is the register that is stored + * rt2 = is the second register store (in STP) + * + */ +static int do_strex_a64(CPUARMState *env) +{ + uint64_t val; + int size; + bool is_pair; + int rc = 1; + int segv = 0; + uint64_t addr; + int rs, rt, rt2; + + start_exclusive(); + /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */ + size = extract32(env->exclusive_info, 0, 2); + is_pair = extract32(env->exclusive_info, 2, 1); + rs = extract32(env->exclusive_info, 4, 5); + rt = extract32(env->exclusive_info, 9, 5); + rt2 = extract32(env->exclusive_info, 14, 5); + + addr = env->exclusive_addr; + + if (addr != env->exclusive_test) { + goto finish; + } + + switch (size) { + case 0: + segv = get_user_u8(val, addr); + break; + case 1: + segv = get_user_u16(val, addr); + break; + case 2: + segv = get_user_u32(val, addr); + break; + case 3: + segv = get_user_u64(val, addr); + break; + default: + abort(); + } + if (segv) { + env->exception.vaddress = addr; + goto error; + } + if (val != env->exclusive_val) { + goto finish; + } + if (is_pair) { + if (size == 2) { + segv = get_user_u32(val, addr + 4); + } else { + segv = get_user_u64(val, addr + 8); + } + if (segv) { + env->exception.vaddress = addr + (size == 2 ? 4 : 8); + goto error; + } + if (val != env->exclusive_high) { + goto finish; + } + } + /* handle the zero register */ + val = rt == 31 ? 0 : env->xregs[rt]; + switch (size) { + case 0: + segv = put_user_u8(val, addr); + break; + case 1: + segv = put_user_u16(val, addr); + break; + case 2: + segv = put_user_u32(val, addr); + break; + case 3: + segv = put_user_u64(val, addr); + break; + } + if (segv) { + goto error; + } + if (is_pair) { + /* handle the zero register */ + val = rt2 == 31 ? 0 : env->xregs[rt2]; + if (size == 2) { + segv = put_user_u32(val, addr + 4); + } else { + segv = put_user_u64(val, addr + 8); + } + if (segv) { + env->exception.vaddress = addr + (size == 2 ? 4 : 8); + goto error; + } + } + rc = 0; +finish: + env->pc += 4; + /* rs == 31 encodes a write to the ZR, thus throwing away + * the status return. This is rather silly but valid. + */ + if (rs < 31) { + env->xregs[rs] = rc; + } +error: + /* instruction faulted, PC does not advance */ + /* either way a strex releases any exclusive lock we have */ + env->exclusive_addr = -1; + end_exclusive(); + return segv; +} + +/* AArch64 main loop */ +void cpu_loop(CPUARMState *env) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + int trapnr, sig; + target_siginfo_t info; + + for (;;) { + cpu_exec_start(cs); + trapnr = cpu_arm_exec(cs); + cpu_exec_end(cs); + + switch (trapnr) { + case EXCP_SWI: + env->xregs[0] = do_syscall(env, + env->xregs[8], + env->xregs[0], + env->xregs[1], + env->xregs[2], + env->xregs[3], + env->xregs[4], + env->xregs[5], + 0, 0); + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_UDEF: + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_ILLOPN; + info._sifields._sigfault._addr = env->pc; + queue_signal(env, info.si_signo, &info); + break; + case EXCP_STREX: + if (!do_strex_a64(env)) { + break; + } + /* fall through for segv */ + case EXCP_PREFETCH_ABORT: + case EXCP_DATA_ABORT: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->exception.vaddress; + queue_signal(env, info.si_signo, &info); + break; + case EXCP_DEBUG: + case EXCP_BKPT: + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + break; + default: + fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", + trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + abort(); + } + process_pending_signals(env); + /* Exception return on AArch64 always clears the exclusive monitor, + * so any return to running guest code implies this. + * A strex (successful or otherwise) also clears the monitor, so + * we don't need to specialcase EXCP_STREX. + */ + env->exclusive_addr = -1; + } +} +#endif /* ndef TARGET_ABI32 */ + +#endif + +#ifdef TARGET_UNICORE32 + +void cpu_loop(CPUUniCore32State *env) +{ + CPUState *cs = CPU(uc32_env_get_cpu(env)); + int trapnr; + unsigned int n, insn; + target_siginfo_t info; + + for (;;) { + cpu_exec_start(cs); + trapnr = uc32_cpu_exec(cs); + cpu_exec_end(cs); + switch (trapnr) { + case UC32_EXCP_PRIV: + { + /* system call */ + get_user_u32(insn, env->regs[31] - 4); + n = insn & 0xffffff; + + if (n >= UC32_SYSCALL_BASE) { + /* linux syscall */ + n -= UC32_SYSCALL_BASE; + if (n == UC32_SYSCALL_NR_set_tls) { + cpu_set_tls(env, env->regs[0]); + env->regs[0] = 0; + } else { + env->regs[0] = do_syscall(env, + n, + env->regs[0], + env->regs[1], + env->regs[2], + env->regs[3], + env->regs[4], + env->regs[5], + 0, 0); + } + } else { + goto error; + } + } + break; + case UC32_EXCP_DTRAP: + case UC32_EXCP_ITRAP: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->cp0.c4_faultaddr; + queue_signal(env, info.si_signo, &info); + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_DEBUG: + { + int sig; + + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + default: + goto error; + } + process_pending_signals(env); + } + +error: + fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + abort(); +} +#endif + +#ifdef TARGET_SPARC +#define SPARC64_STACK_BIAS 2047 + +//#define DEBUG_WIN + +/* WARNING: dealing with register windows _is_ complicated. More info + can be found at http://www.sics.se/~psm/sparcstack.html */ +static inline int get_reg_index(CPUSPARCState *env, int cwp, int index) +{ + index = (index + cwp * 16) % (16 * env->nwindows); + /* wrap handling : if cwp is on the last window, then we use the + registers 'after' the end */ + if (index < 8 && env->cwp == env->nwindows - 1) + index += 16 * env->nwindows; + return index; +} + +/* save the register window 'cwp1' */ +static inline void save_window_offset(CPUSPARCState *env, int cwp1) +{ + unsigned int i; + abi_ulong sp_ptr; + + sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)]; +#ifdef TARGET_SPARC64 + if (sp_ptr & 3) + sp_ptr += SPARC64_STACK_BIAS; +#endif +#if defined(DEBUG_WIN) + printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n", + sp_ptr, cwp1); +#endif + for(i = 0; i < 16; i++) { + /* FIXME - what to do if put_user() fails? */ + put_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr); + sp_ptr += sizeof(abi_ulong); + } +} + +static void save_window(CPUSPARCState *env) +{ +#ifndef TARGET_SPARC64 + unsigned int new_wim; + new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) & + ((1LL << env->nwindows) - 1); + save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2)); + env->wim = new_wim; +#else + save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2)); + env->cansave++; + env->canrestore--; +#endif +} + +static void restore_window(CPUSPARCState *env) +{ +#ifndef TARGET_SPARC64 + unsigned int new_wim; +#endif + unsigned int i, cwp1; + abi_ulong sp_ptr; + +#ifndef TARGET_SPARC64 + new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) & + ((1LL << env->nwindows) - 1); +#endif + + /* restore the invalid window */ + cwp1 = cpu_cwp_inc(env, env->cwp + 1); + sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)]; +#ifdef TARGET_SPARC64 + if (sp_ptr & 3) + sp_ptr += SPARC64_STACK_BIAS; +#endif +#if defined(DEBUG_WIN) + printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n", + sp_ptr, cwp1); +#endif + for(i = 0; i < 16; i++) { + /* FIXME - what to do if get_user() fails? */ + get_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr); + sp_ptr += sizeof(abi_ulong); + } +#ifdef TARGET_SPARC64 + env->canrestore++; + if (env->cleanwin < env->nwindows - 1) + env->cleanwin++; + env->cansave--; +#else + env->wim = new_wim; +#endif +} + +static void flush_windows(CPUSPARCState *env) +{ + int offset, cwp1; + + offset = 1; + for(;;) { + /* if restore would invoke restore_window(), then we can stop */ + cwp1 = cpu_cwp_inc(env, env->cwp + offset); +#ifndef TARGET_SPARC64 + if (env->wim & (1 << cwp1)) + break; +#else + if (env->canrestore == 0) + break; + env->cansave++; + env->canrestore--; +#endif + save_window_offset(env, cwp1); + offset++; + } + cwp1 = cpu_cwp_inc(env, env->cwp + 1); +#ifndef TARGET_SPARC64 + /* set wim so that restore will reload the registers */ + env->wim = 1 << cwp1; +#endif +#if defined(DEBUG_WIN) + printf("flush_windows: nb=%d\n", offset - 1); +#endif +} + +void cpu_loop (CPUSPARCState *env) +{ + CPUState *cs = CPU(sparc_env_get_cpu(env)); + int trapnr; + abi_long ret; + target_siginfo_t info; + + while (1) { + cpu_exec_start(cs); + trapnr = cpu_sparc_exec(cs); + cpu_exec_end(cs); + + /* Compute PSR before exposing state. */ + if (env->cc_op != CC_OP_FLAGS) { + cpu_get_psr(env); + } + + switch (trapnr) { +#ifndef TARGET_SPARC64 + case 0x88: + case 0x90: +#else + case 0x110: + case 0x16d: +#endif + ret = do_syscall (env, env->gregs[1], + env->regwptr[0], env->regwptr[1], + env->regwptr[2], env->regwptr[3], + env->regwptr[4], env->regwptr[5], + 0, 0); + if ((abi_ulong)ret >= (abi_ulong)(-515)) { +#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) + env->xcc |= PSR_CARRY; +#else + env->psr |= PSR_CARRY; +#endif + ret = -ret; + } else { +#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) + env->xcc &= ~PSR_CARRY; +#else + env->psr &= ~PSR_CARRY; +#endif + } + env->regwptr[0] = ret; + /* next instruction */ + env->pc = env->npc; + env->npc = env->npc + 4; + break; + case 0x83: /* flush windows */ +#ifdef TARGET_ABI32 + case 0x103: +#endif + flush_windows(env); + /* next instruction */ + env->pc = env->npc; + env->npc = env->npc + 4; + break; +#ifndef TARGET_SPARC64 + case TT_WIN_OVF: /* window overflow */ + save_window(env); + break; + case TT_WIN_UNF: /* window underflow */ + restore_window(env); + break; + case TT_TFAULT: + case TT_DFAULT: + { + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->mmuregs[4]; + queue_signal(env, info.si_signo, &info); + } + break; +#else + case TT_SPILL: /* window overflow */ + save_window(env); + break; + case TT_FILL: /* window underflow */ + restore_window(env); + break; + case TT_TFAULT: + case TT_DFAULT: + { + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + if (trapnr == TT_DFAULT) + info._sifields._sigfault._addr = env->dmmuregs[4]; + else + info._sifields._sigfault._addr = cpu_tsptr(env)->tpc; + queue_signal(env, info.si_signo, &info); + } + break; +#ifndef TARGET_ABI32 + case 0x16e: + flush_windows(env); + sparc64_get_context(env); + break; + case 0x16f: + flush_windows(env); + sparc64_set_context(env); + break; +#endif +#endif + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case TT_ILL_INSN: + { + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_ILLOPC; + info._sifields._sigfault._addr = env->pc; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP_DEBUG: + { + int sig; + + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) + { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + default: + printf ("Unhandled trap: 0x%x\n", trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + exit (1); + } + process_pending_signals (env); + } +} + +#endif + +#ifdef TARGET_PPC +static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env) +{ + return cpu_get_real_ticks(); +} + +uint64_t cpu_ppc_load_tbl(CPUPPCState *env) +{ + return cpu_ppc_get_tb(env); +} + +uint32_t cpu_ppc_load_tbu(CPUPPCState *env) +{ + return cpu_ppc_get_tb(env) >> 32; +} + +uint64_t cpu_ppc_load_atbl(CPUPPCState *env) +{ + return cpu_ppc_get_tb(env); +} + +uint32_t cpu_ppc_load_atbu(CPUPPCState *env) +{ + return cpu_ppc_get_tb(env) >> 32; +} + +uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env) +__attribute__ (( alias ("cpu_ppc_load_tbu") )); + +uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env) +{ + return cpu_ppc_load_tbl(env) & 0x3FFFFF80; +} + +/* XXX: to be fixed */ +int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp) +{ + return -1; +} + +int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val) +{ + return -1; +} + +#define EXCP_DUMP(env, fmt, ...) \ +do { \ + CPUState *cs = ENV_GET_CPU(env); \ + fprintf(stderr, fmt , ## __VA_ARGS__); \ + cpu_dump_state(cs, stderr, fprintf, 0); \ + qemu_log(fmt, ## __VA_ARGS__); \ + if (qemu_log_enabled()) { \ + log_cpu_state(cs, 0); \ + } \ +} while (0) + +static int do_store_exclusive(CPUPPCState *env) +{ + target_ulong addr; + target_ulong page_addr; + target_ulong val, val2 __attribute__((unused)) = 0; + int flags; + int segv = 0; + + addr = env->reserve_ea; + page_addr = addr & TARGET_PAGE_MASK; + start_exclusive(); + mmap_lock(); + flags = page_get_flags(page_addr); + if ((flags & PAGE_READ) == 0) { + segv = 1; + } else { + int reg = env->reserve_info & 0x1f; + int size = env->reserve_info >> 5; + int stored = 0; + + if (addr == env->reserve_addr) { + switch (size) { + case 1: segv = get_user_u8(val, addr); break; + case 2: segv = get_user_u16(val, addr); break; + case 4: segv = get_user_u32(val, addr); break; +#if defined(TARGET_PPC64) + case 8: segv = get_user_u64(val, addr); break; + case 16: { + segv = get_user_u64(val, addr); + if (!segv) { + segv = get_user_u64(val2, addr + 8); + } + break; + } +#endif + default: abort(); + } + if (!segv && val == env->reserve_val) { + val = env->gpr[reg]; + switch (size) { + case 1: segv = put_user_u8(val, addr); break; + case 2: segv = put_user_u16(val, addr); break; + case 4: segv = put_user_u32(val, addr); break; +#if defined(TARGET_PPC64) + case 8: segv = put_user_u64(val, addr); break; + case 16: { + if (val2 == env->reserve_val2) { + if (msr_le) { + val2 = val; + val = env->gpr[reg+1]; + } else { + val2 = env->gpr[reg+1]; + } + segv = put_user_u64(val, addr); + if (!segv) { + segv = put_user_u64(val2, addr + 8); + } + } + break; + } +#endif + default: abort(); + } + if (!segv) { + stored = 1; + } + } + } + env->crf[0] = (stored << 1) | xer_so; + env->reserve_addr = (target_ulong)-1; + } + if (!segv) { + env->nip += 4; + } + mmap_unlock(); + end_exclusive(); + return segv; +} + +void cpu_loop(CPUPPCState *env) +{ + CPUState *cs = CPU(ppc_env_get_cpu(env)); + target_siginfo_t info; + int trapnr; + target_ulong ret; + + for(;;) { + cpu_exec_start(cs); + trapnr = cpu_ppc_exec(cs); + cpu_exec_end(cs); + switch(trapnr) { + case POWERPC_EXCP_NONE: + /* Just go on */ + break; + case POWERPC_EXCP_CRITICAL: /* Critical input */ + cpu_abort(cs, "Critical interrupt while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_MCHECK: /* Machine check exception */ + cpu_abort(cs, "Machine check exception while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_DSI: /* Data storage exception */ + EXCP_DUMP(env, "Invalid data memory access: 0x" TARGET_FMT_lx "\n", + env->spr[SPR_DAR]); + /* XXX: check this. Seems bugged */ + switch (env->error_code & 0xFF000000) { + case 0x40000000: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SEGV_MAPERR; + break; + case 0x04000000: + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_ILLADR; + break; + case 0x08000000: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SEGV_ACCERR; + break; + default: + /* Let's send a regular segfault... */ + EXCP_DUMP(env, "Invalid segfault errno (%02x)\n", + env->error_code); + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SEGV_MAPERR; + break; + } + info._sifields._sigfault._addr = env->nip; + queue_signal(env, info.si_signo, &info); + break; + case POWERPC_EXCP_ISI: /* Instruction storage exception */ + EXCP_DUMP(env, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx + "\n", env->spr[SPR_SRR0]); + /* XXX: check this */ + switch (env->error_code & 0xFF000000) { + case 0x40000000: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SEGV_MAPERR; + break; + case 0x10000000: + case 0x08000000: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SEGV_ACCERR; + break; + default: + /* Let's send a regular segfault... */ + EXCP_DUMP(env, "Invalid segfault errno (%02x)\n", + env->error_code); + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SEGV_MAPERR; + break; + } + info._sifields._sigfault._addr = env->nip - 4; + queue_signal(env, info.si_signo, &info); + break; + case POWERPC_EXCP_EXTERNAL: /* External input */ + cpu_abort(cs, "External interrupt while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_ALIGN: /* Alignment exception */ + EXCP_DUMP(env, "Unaligned memory access\n"); + /* XXX: check this */ + info.si_signo = TARGET_SIGBUS; + info.si_errno = 0; + info.si_code = TARGET_BUS_ADRALN; + info._sifields._sigfault._addr = env->nip - 4; + queue_signal(env, info.si_signo, &info); + break; + case POWERPC_EXCP_PROGRAM: /* Program exception */ + /* XXX: check this */ + switch (env->error_code & ~0xF) { + case POWERPC_EXCP_FP: + EXCP_DUMP(env, "Floating point program exception\n"); + info.si_signo = TARGET_SIGFPE; + info.si_errno = 0; + switch (env->error_code & 0xF) { + case POWERPC_EXCP_FP_OX: + info.si_code = TARGET_FPE_FLTOVF; + break; + case POWERPC_EXCP_FP_UX: + info.si_code = TARGET_FPE_FLTUND; + break; + case POWERPC_EXCP_FP_ZX: + case POWERPC_EXCP_FP_VXZDZ: + info.si_code = TARGET_FPE_FLTDIV; + break; + case POWERPC_EXCP_FP_XX: + info.si_code = TARGET_FPE_FLTRES; + break; + case POWERPC_EXCP_FP_VXSOFT: + info.si_code = TARGET_FPE_FLTINV; + break; + case POWERPC_EXCP_FP_VXSNAN: + case POWERPC_EXCP_FP_VXISI: + case POWERPC_EXCP_FP_VXIDI: + case POWERPC_EXCP_FP_VXIMZ: + case POWERPC_EXCP_FP_VXVC: + case POWERPC_EXCP_FP_VXSQRT: + case POWERPC_EXCP_FP_VXCVI: + info.si_code = TARGET_FPE_FLTSUB; + break; + default: + EXCP_DUMP(env, "Unknown floating point exception (%02x)\n", + env->error_code); + break; + } + break; + case POWERPC_EXCP_INVAL: + EXCP_DUMP(env, "Invalid instruction\n"); + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + switch (env->error_code & 0xF) { + case POWERPC_EXCP_INVAL_INVAL: + info.si_code = TARGET_ILL_ILLOPC; + break; + case POWERPC_EXCP_INVAL_LSWX: + info.si_code = TARGET_ILL_ILLOPN; + break; + case POWERPC_EXCP_INVAL_SPR: + info.si_code = TARGET_ILL_PRVREG; + break; + case POWERPC_EXCP_INVAL_FP: + info.si_code = TARGET_ILL_COPROC; + break; + default: + EXCP_DUMP(env, "Unknown invalid operation (%02x)\n", + env->error_code & 0xF); + info.si_code = TARGET_ILL_ILLADR; + break; + } + break; + case POWERPC_EXCP_PRIV: + EXCP_DUMP(env, "Privilege violation\n"); + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + switch (env->error_code & 0xF) { + case POWERPC_EXCP_PRIV_OPC: + info.si_code = TARGET_ILL_PRVOPC; + break; + case POWERPC_EXCP_PRIV_REG: + info.si_code = TARGET_ILL_PRVREG; + break; + default: + EXCP_DUMP(env, "Unknown privilege violation (%02x)\n", + env->error_code & 0xF); + info.si_code = TARGET_ILL_PRVOPC; + break; + } + break; + case POWERPC_EXCP_TRAP: + cpu_abort(cs, "Tried to call a TRAP\n"); + break; + default: + /* Should not happen ! */ + cpu_abort(cs, "Unknown program exception (%02x)\n", + env->error_code); + break; + } + info._sifields._sigfault._addr = env->nip - 4; + queue_signal(env, info.si_signo, &info); + break; + case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ + EXCP_DUMP(env, "No floating point allowed\n"); + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_COPROC; + info._sifields._sigfault._addr = env->nip - 4; + queue_signal(env, info.si_signo, &info); + break; + case POWERPC_EXCP_SYSCALL: /* System call exception */ + cpu_abort(cs, "Syscall exception while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ + EXCP_DUMP(env, "No APU instruction allowed\n"); + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_COPROC; + info._sifields._sigfault._addr = env->nip - 4; + queue_signal(env, info.si_signo, &info); + break; + case POWERPC_EXCP_DECR: /* Decrementer exception */ + cpu_abort(cs, "Decrementer interrupt while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ + cpu_abort(cs, "Fix interval timer interrupt while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ + cpu_abort(cs, "Watchdog timer interrupt while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_DTLB: /* Data TLB error */ + cpu_abort(cs, "Data TLB exception while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_ITLB: /* Instruction TLB error */ + cpu_abort(cs, "Instruction TLB exception while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */ + EXCP_DUMP(env, "No SPE/floating-point instruction allowed\n"); + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_COPROC; + info._sifields._sigfault._addr = env->nip - 4; + queue_signal(env, info.si_signo, &info); + break; + case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */ + cpu_abort(cs, "Embedded floating-point data IRQ not handled\n"); + break; + case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */ + cpu_abort(cs, "Embedded floating-point round IRQ not handled\n"); + break; + case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */ + cpu_abort(cs, "Performance monitor exception not handled\n"); + break; + case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ + cpu_abort(cs, "Doorbell interrupt while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ + cpu_abort(cs, "Doorbell critical interrupt while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_RESET: /* System reset exception */ + cpu_abort(cs, "Reset interrupt while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_DSEG: /* Data segment exception */ + cpu_abort(cs, "Data segment exception while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_ISEG: /* Instruction segment exception */ + cpu_abort(cs, "Instruction segment exception " + "while in user mode. Aborting\n"); + break; + /* PowerPC 64 with hypervisor mode support */ + case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ + cpu_abort(cs, "Hypervisor decrementer interrupt " + "while in user mode. Aborting\n"); + break; + case POWERPC_EXCP_TRACE: /* Trace exception */ + /* Nothing to do: + * we use this exception to emulate step-by-step execution mode. + */ + break; + /* PowerPC 64 with hypervisor mode support */ + case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ + cpu_abort(cs, "Hypervisor data storage exception " + "while in user mode. Aborting\n"); + break; + case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */ + cpu_abort(cs, "Hypervisor instruction storage exception " + "while in user mode. Aborting\n"); + break; + case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ + cpu_abort(cs, "Hypervisor data segment exception " + "while in user mode. Aborting\n"); + break; + case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */ + cpu_abort(cs, "Hypervisor instruction segment exception " + "while in user mode. Aborting\n"); + break; + case POWERPC_EXCP_VPU: /* Vector unavailable exception */ + EXCP_DUMP(env, "No Altivec instructions allowed\n"); + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_COPROC; + info._sifields._sigfault._addr = env->nip - 4; + queue_signal(env, info.si_signo, &info); + break; + case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */ + cpu_abort(cs, "Programmable interval timer interrupt " + "while in user mode. Aborting\n"); + break; + case POWERPC_EXCP_IO: /* IO error exception */ + cpu_abort(cs, "IO error exception while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_RUNM: /* Run mode exception */ + cpu_abort(cs, "Run mode exception while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_EMUL: /* Emulation trap exception */ + cpu_abort(cs, "Emulation trap exception not handled\n"); + break; + case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ + cpu_abort(cs, "Instruction fetch TLB exception " + "while in user-mode. Aborting"); + break; + case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ + cpu_abort(cs, "Data load TLB exception while in user-mode. " + "Aborting"); + break; + case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ + cpu_abort(cs, "Data store TLB exception while in user-mode. " + "Aborting"); + break; + case POWERPC_EXCP_FPA: /* Floating-point assist exception */ + cpu_abort(cs, "Floating-point assist exception not handled\n"); + break; + case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ + cpu_abort(cs, "Instruction address breakpoint exception " + "not handled\n"); + break; + case POWERPC_EXCP_SMI: /* System management interrupt */ + cpu_abort(cs, "System management interrupt while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_THERM: /* Thermal interrupt */ + cpu_abort(cs, "Thermal interrupt interrupt while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */ + cpu_abort(cs, "Performance monitor exception not handled\n"); + break; + case POWERPC_EXCP_VPUA: /* Vector assist exception */ + cpu_abort(cs, "Vector assist exception not handled\n"); + break; + case POWERPC_EXCP_SOFTP: /* Soft patch exception */ + cpu_abort(cs, "Soft patch exception not handled\n"); + break; + case POWERPC_EXCP_MAINT: /* Maintenance exception */ + cpu_abort(cs, "Maintenance exception while in user mode. " + "Aborting\n"); + break; + case POWERPC_EXCP_STOP: /* stop translation */ + /* We did invalidate the instruction cache. Go on */ + break; + case POWERPC_EXCP_BRANCH: /* branch instruction: */ + /* We just stopped because of a branch. Go on */ + break; + case POWERPC_EXCP_SYSCALL_USER: + /* system call in user-mode emulation */ + /* WARNING: + * PPC ABI uses overflow flag in cr0 to signal an error + * in syscalls. + */ + env->crf[0] &= ~0x1; + ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4], + env->gpr[5], env->gpr[6], env->gpr[7], + env->gpr[8], 0, 0); + if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) { + /* Returning from a successful sigreturn syscall. + Avoid corrupting register state. */ + break; + } + if (ret > (target_ulong)(-515)) { + env->crf[0] |= 0x1; + ret = -ret; + } + env->gpr[3] = ret; + break; + case POWERPC_EXCP_STCX: + if (do_store_exclusive(env)) { + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->nip; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP_DEBUG: + { + int sig; + + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + default: + cpu_abort(cs, "Unknown exception 0x%d. Aborting\n", trapnr); + break; + } + process_pending_signals(env); + } +} +#endif + +#ifdef TARGET_MIPS + +# ifdef TARGET_ABI_MIPSO32 +# define MIPS_SYS(name, args) args, +static const uint8_t mips_syscall_args[] = { + MIPS_SYS(sys_syscall , 8) /* 4000 */ + MIPS_SYS(sys_exit , 1) + MIPS_SYS(sys_fork , 0) + MIPS_SYS(sys_read , 3) + MIPS_SYS(sys_write , 3) + MIPS_SYS(sys_open , 3) /* 4005 */ + MIPS_SYS(sys_close , 1) + MIPS_SYS(sys_waitpid , 3) + MIPS_SYS(sys_creat , 2) + MIPS_SYS(sys_link , 2) + MIPS_SYS(sys_unlink , 1) /* 4010 */ + MIPS_SYS(sys_execve , 0) + MIPS_SYS(sys_chdir , 1) + MIPS_SYS(sys_time , 1) + MIPS_SYS(sys_mknod , 3) + MIPS_SYS(sys_chmod , 2) /* 4015 */ + MIPS_SYS(sys_lchown , 3) + MIPS_SYS(sys_ni_syscall , 0) + MIPS_SYS(sys_ni_syscall , 0) /* was sys_stat */ + MIPS_SYS(sys_lseek , 3) + MIPS_SYS(sys_getpid , 0) /* 4020 */ + MIPS_SYS(sys_mount , 5) + MIPS_SYS(sys_umount , 1) + MIPS_SYS(sys_setuid , 1) + MIPS_SYS(sys_getuid , 0) + MIPS_SYS(sys_stime , 1) /* 4025 */ + MIPS_SYS(sys_ptrace , 4) + MIPS_SYS(sys_alarm , 1) + MIPS_SYS(sys_ni_syscall , 0) /* was sys_fstat */ + MIPS_SYS(sys_pause , 0) + MIPS_SYS(sys_utime , 2) /* 4030 */ + MIPS_SYS(sys_ni_syscall , 0) + MIPS_SYS(sys_ni_syscall , 0) + MIPS_SYS(sys_access , 2) + MIPS_SYS(sys_nice , 1) + MIPS_SYS(sys_ni_syscall , 0) /* 4035 */ + MIPS_SYS(sys_sync , 0) + MIPS_SYS(sys_kill , 2) + MIPS_SYS(sys_rename , 2) + MIPS_SYS(sys_mkdir , 2) + MIPS_SYS(sys_rmdir , 1) /* 4040 */ + MIPS_SYS(sys_dup , 1) + MIPS_SYS(sys_pipe , 0) + MIPS_SYS(sys_times , 1) + MIPS_SYS(sys_ni_syscall , 0) + MIPS_SYS(sys_brk , 1) /* 4045 */ + MIPS_SYS(sys_setgid , 1) + MIPS_SYS(sys_getgid , 0) + MIPS_SYS(sys_ni_syscall , 0) /* was signal(2) */ + MIPS_SYS(sys_geteuid , 0) + MIPS_SYS(sys_getegid , 0) /* 4050 */ + MIPS_SYS(sys_acct , 0) + MIPS_SYS(sys_umount2 , 2) + MIPS_SYS(sys_ni_syscall , 0) + MIPS_SYS(sys_ioctl , 3) + MIPS_SYS(sys_fcntl , 3) /* 4055 */ + MIPS_SYS(sys_ni_syscall , 2) + MIPS_SYS(sys_setpgid , 2) + MIPS_SYS(sys_ni_syscall , 0) + MIPS_SYS(sys_olduname , 1) + MIPS_SYS(sys_umask , 1) /* 4060 */ + MIPS_SYS(sys_chroot , 1) + MIPS_SYS(sys_ustat , 2) + MIPS_SYS(sys_dup2 , 2) + MIPS_SYS(sys_getppid , 0) + MIPS_SYS(sys_getpgrp , 0) /* 4065 */ + MIPS_SYS(sys_setsid , 0) + MIPS_SYS(sys_sigaction , 3) + MIPS_SYS(sys_sgetmask , 0) + MIPS_SYS(sys_ssetmask , 1) + MIPS_SYS(sys_setreuid , 2) /* 4070 */ + MIPS_SYS(sys_setregid , 2) + MIPS_SYS(sys_sigsuspend , 0) + MIPS_SYS(sys_sigpending , 1) + MIPS_SYS(sys_sethostname , 2) + MIPS_SYS(sys_setrlimit , 2) /* 4075 */ + MIPS_SYS(sys_getrlimit , 2) + MIPS_SYS(sys_getrusage , 2) + MIPS_SYS(sys_gettimeofday, 2) + MIPS_SYS(sys_settimeofday, 2) + MIPS_SYS(sys_getgroups , 2) /* 4080 */ + MIPS_SYS(sys_setgroups , 2) + MIPS_SYS(sys_ni_syscall , 0) /* old_select */ + MIPS_SYS(sys_symlink , 2) + MIPS_SYS(sys_ni_syscall , 0) /* was sys_lstat */ + MIPS_SYS(sys_readlink , 3) /* 4085 */ + MIPS_SYS(sys_uselib , 1) + MIPS_SYS(sys_swapon , 2) + MIPS_SYS(sys_reboot , 3) + MIPS_SYS(old_readdir , 3) + MIPS_SYS(old_mmap , 6) /* 4090 */ + MIPS_SYS(sys_munmap , 2) + MIPS_SYS(sys_truncate , 2) + MIPS_SYS(sys_ftruncate , 2) + MIPS_SYS(sys_fchmod , 2) + MIPS_SYS(sys_fchown , 3) /* 4095 */ + MIPS_SYS(sys_getpriority , 2) + MIPS_SYS(sys_setpriority , 3) + MIPS_SYS(sys_ni_syscall , 0) + MIPS_SYS(sys_statfs , 2) + MIPS_SYS(sys_fstatfs , 2) /* 4100 */ + MIPS_SYS(sys_ni_syscall , 0) /* was ioperm(2) */ + MIPS_SYS(sys_socketcall , 2) + MIPS_SYS(sys_syslog , 3) + MIPS_SYS(sys_setitimer , 3) + MIPS_SYS(sys_getitimer , 2) /* 4105 */ + MIPS_SYS(sys_newstat , 2) + MIPS_SYS(sys_newlstat , 2) + MIPS_SYS(sys_newfstat , 2) + MIPS_SYS(sys_uname , 1) + MIPS_SYS(sys_ni_syscall , 0) /* 4110 was iopl(2) */ + MIPS_SYS(sys_vhangup , 0) + MIPS_SYS(sys_ni_syscall , 0) /* was sys_idle() */ + MIPS_SYS(sys_ni_syscall , 0) /* was sys_vm86 */ + MIPS_SYS(sys_wait4 , 4) + MIPS_SYS(sys_swapoff , 1) /* 4115 */ + MIPS_SYS(sys_sysinfo , 1) + MIPS_SYS(sys_ipc , 6) + MIPS_SYS(sys_fsync , 1) + MIPS_SYS(sys_sigreturn , 0) + MIPS_SYS(sys_clone , 6) /* 4120 */ + MIPS_SYS(sys_setdomainname, 2) + MIPS_SYS(sys_newuname , 1) + MIPS_SYS(sys_ni_syscall , 0) /* sys_modify_ldt */ + MIPS_SYS(sys_adjtimex , 1) + MIPS_SYS(sys_mprotect , 3) /* 4125 */ + MIPS_SYS(sys_sigprocmask , 3) + MIPS_SYS(sys_ni_syscall , 0) /* was create_module */ + MIPS_SYS(sys_init_module , 5) + MIPS_SYS(sys_delete_module, 1) + MIPS_SYS(sys_ni_syscall , 0) /* 4130 was get_kernel_syms */ + MIPS_SYS(sys_quotactl , 0) + MIPS_SYS(sys_getpgid , 1) + MIPS_SYS(sys_fchdir , 1) + MIPS_SYS(sys_bdflush , 2) + MIPS_SYS(sys_sysfs , 3) /* 4135 */ + MIPS_SYS(sys_personality , 1) + MIPS_SYS(sys_ni_syscall , 0) /* for afs_syscall */ + MIPS_SYS(sys_setfsuid , 1) + MIPS_SYS(sys_setfsgid , 1) + MIPS_SYS(sys_llseek , 5) /* 4140 */ + MIPS_SYS(sys_getdents , 3) + MIPS_SYS(sys_select , 5) + MIPS_SYS(sys_flock , 2) + MIPS_SYS(sys_msync , 3) + MIPS_SYS(sys_readv , 3) /* 4145 */ + MIPS_SYS(sys_writev , 3) + MIPS_SYS(sys_cacheflush , 3) + MIPS_SYS(sys_cachectl , 3) + MIPS_SYS(sys_sysmips , 4) + MIPS_SYS(sys_ni_syscall , 0) /* 4150 */ + MIPS_SYS(sys_getsid , 1) + MIPS_SYS(sys_fdatasync , 0) + MIPS_SYS(sys_sysctl , 1) + MIPS_SYS(sys_mlock , 2) + MIPS_SYS(sys_munlock , 2) /* 4155 */ + MIPS_SYS(sys_mlockall , 1) + MIPS_SYS(sys_munlockall , 0) + MIPS_SYS(sys_sched_setparam, 2) + MIPS_SYS(sys_sched_getparam, 2) + MIPS_SYS(sys_sched_setscheduler, 3) /* 4160 */ + MIPS_SYS(sys_sched_getscheduler, 1) + MIPS_SYS(sys_sched_yield , 0) + MIPS_SYS(sys_sched_get_priority_max, 1) + MIPS_SYS(sys_sched_get_priority_min, 1) + MIPS_SYS(sys_sched_rr_get_interval, 2) /* 4165 */ + MIPS_SYS(sys_nanosleep, 2) + MIPS_SYS(sys_mremap , 5) + MIPS_SYS(sys_accept , 3) + MIPS_SYS(sys_bind , 3) + MIPS_SYS(sys_connect , 3) /* 4170 */ + MIPS_SYS(sys_getpeername , 3) + MIPS_SYS(sys_getsockname , 3) + MIPS_SYS(sys_getsockopt , 5) + MIPS_SYS(sys_listen , 2) + MIPS_SYS(sys_recv , 4) /* 4175 */ + MIPS_SYS(sys_recvfrom , 6) + MIPS_SYS(sys_recvmsg , 3) + MIPS_SYS(sys_send , 4) + MIPS_SYS(sys_sendmsg , 3) + MIPS_SYS(sys_sendto , 6) /* 4180 */ + MIPS_SYS(sys_setsockopt , 5) + MIPS_SYS(sys_shutdown , 2) + MIPS_SYS(sys_socket , 3) + MIPS_SYS(sys_socketpair , 4) + MIPS_SYS(sys_setresuid , 3) /* 4185 */ + MIPS_SYS(sys_getresuid , 3) + MIPS_SYS(sys_ni_syscall , 0) /* was sys_query_module */ + MIPS_SYS(sys_poll , 3) + MIPS_SYS(sys_nfsservctl , 3) + MIPS_SYS(sys_setresgid , 3) /* 4190 */ + MIPS_SYS(sys_getresgid , 3) + MIPS_SYS(sys_prctl , 5) + MIPS_SYS(sys_rt_sigreturn, 0) + MIPS_SYS(sys_rt_sigaction, 4) + MIPS_SYS(sys_rt_sigprocmask, 4) /* 4195 */ + MIPS_SYS(sys_rt_sigpending, 2) + MIPS_SYS(sys_rt_sigtimedwait, 4) + MIPS_SYS(sys_rt_sigqueueinfo, 3) + MIPS_SYS(sys_rt_sigsuspend, 0) + MIPS_SYS(sys_pread64 , 6) /* 4200 */ + MIPS_SYS(sys_pwrite64 , 6) + MIPS_SYS(sys_chown , 3) + MIPS_SYS(sys_getcwd , 2) + MIPS_SYS(sys_capget , 2) + MIPS_SYS(sys_capset , 2) /* 4205 */ + MIPS_SYS(sys_sigaltstack , 2) + MIPS_SYS(sys_sendfile , 4) + MIPS_SYS(sys_ni_syscall , 0) + MIPS_SYS(sys_ni_syscall , 0) + MIPS_SYS(sys_mmap2 , 6) /* 4210 */ + MIPS_SYS(sys_truncate64 , 4) + MIPS_SYS(sys_ftruncate64 , 4) + MIPS_SYS(sys_stat64 , 2) + MIPS_SYS(sys_lstat64 , 2) + MIPS_SYS(sys_fstat64 , 2) /* 4215 */ + MIPS_SYS(sys_pivot_root , 2) + MIPS_SYS(sys_mincore , 3) + MIPS_SYS(sys_madvise , 3) + MIPS_SYS(sys_getdents64 , 3) + MIPS_SYS(sys_fcntl64 , 3) /* 4220 */ + MIPS_SYS(sys_ni_syscall , 0) + MIPS_SYS(sys_gettid , 0) + MIPS_SYS(sys_readahead , 5) + MIPS_SYS(sys_setxattr , 5) + MIPS_SYS(sys_lsetxattr , 5) /* 4225 */ + MIPS_SYS(sys_fsetxattr , 5) + MIPS_SYS(sys_getxattr , 4) + MIPS_SYS(sys_lgetxattr , 4) + MIPS_SYS(sys_fgetxattr , 4) + MIPS_SYS(sys_listxattr , 3) /* 4230 */ + MIPS_SYS(sys_llistxattr , 3) + MIPS_SYS(sys_flistxattr , 3) + MIPS_SYS(sys_removexattr , 2) + MIPS_SYS(sys_lremovexattr, 2) + MIPS_SYS(sys_fremovexattr, 2) /* 4235 */ + MIPS_SYS(sys_tkill , 2) + MIPS_SYS(sys_sendfile64 , 5) + MIPS_SYS(sys_futex , 6) + MIPS_SYS(sys_sched_setaffinity, 3) + MIPS_SYS(sys_sched_getaffinity, 3) /* 4240 */ + MIPS_SYS(sys_io_setup , 2) + MIPS_SYS(sys_io_destroy , 1) + MIPS_SYS(sys_io_getevents, 5) + MIPS_SYS(sys_io_submit , 3) + MIPS_SYS(sys_io_cancel , 3) /* 4245 */ + MIPS_SYS(sys_exit_group , 1) + MIPS_SYS(sys_lookup_dcookie, 3) + MIPS_SYS(sys_epoll_create, 1) + MIPS_SYS(sys_epoll_ctl , 4) + MIPS_SYS(sys_epoll_wait , 3) /* 4250 */ + MIPS_SYS(sys_remap_file_pages, 5) + MIPS_SYS(sys_set_tid_address, 1) + MIPS_SYS(sys_restart_syscall, 0) + MIPS_SYS(sys_fadvise64_64, 7) + MIPS_SYS(sys_statfs64 , 3) /* 4255 */ + MIPS_SYS(sys_fstatfs64 , 2) + MIPS_SYS(sys_timer_create, 3) + MIPS_SYS(sys_timer_settime, 4) + MIPS_SYS(sys_timer_gettime, 2) + MIPS_SYS(sys_timer_getoverrun, 1) /* 4260 */ + MIPS_SYS(sys_timer_delete, 1) + MIPS_SYS(sys_clock_settime, 2) + MIPS_SYS(sys_clock_gettime, 2) + MIPS_SYS(sys_clock_getres, 2) + MIPS_SYS(sys_clock_nanosleep, 4) /* 4265 */ + MIPS_SYS(sys_tgkill , 3) + MIPS_SYS(sys_utimes , 2) + MIPS_SYS(sys_mbind , 4) + MIPS_SYS(sys_ni_syscall , 0) /* sys_get_mempolicy */ + MIPS_SYS(sys_ni_syscall , 0) /* 4270 sys_set_mempolicy */ + MIPS_SYS(sys_mq_open , 4) + MIPS_SYS(sys_mq_unlink , 1) + MIPS_SYS(sys_mq_timedsend, 5) + MIPS_SYS(sys_mq_timedreceive, 5) + MIPS_SYS(sys_mq_notify , 2) /* 4275 */ + MIPS_SYS(sys_mq_getsetattr, 3) + MIPS_SYS(sys_ni_syscall , 0) /* sys_vserver */ + MIPS_SYS(sys_waitid , 4) + MIPS_SYS(sys_ni_syscall , 0) /* available, was setaltroot */ + MIPS_SYS(sys_add_key , 5) + MIPS_SYS(sys_request_key, 4) + MIPS_SYS(sys_keyctl , 5) + MIPS_SYS(sys_set_thread_area, 1) + MIPS_SYS(sys_inotify_init, 0) + MIPS_SYS(sys_inotify_add_watch, 3) /* 4285 */ + MIPS_SYS(sys_inotify_rm_watch, 2) + MIPS_SYS(sys_migrate_pages, 4) + MIPS_SYS(sys_openat, 4) + MIPS_SYS(sys_mkdirat, 3) + MIPS_SYS(sys_mknodat, 4) /* 4290 */ + MIPS_SYS(sys_fchownat, 5) + MIPS_SYS(sys_futimesat, 3) + MIPS_SYS(sys_fstatat64, 4) + MIPS_SYS(sys_unlinkat, 3) + MIPS_SYS(sys_renameat, 4) /* 4295 */ + MIPS_SYS(sys_linkat, 5) + MIPS_SYS(sys_symlinkat, 3) + MIPS_SYS(sys_readlinkat, 4) + MIPS_SYS(sys_fchmodat, 3) + MIPS_SYS(sys_faccessat, 3) /* 4300 */ + MIPS_SYS(sys_pselect6, 6) + MIPS_SYS(sys_ppoll, 5) + MIPS_SYS(sys_unshare, 1) + MIPS_SYS(sys_splice, 6) + MIPS_SYS(sys_sync_file_range, 7) /* 4305 */ + MIPS_SYS(sys_tee, 4) + MIPS_SYS(sys_vmsplice, 4) + MIPS_SYS(sys_move_pages, 6) + MIPS_SYS(sys_set_robust_list, 2) + MIPS_SYS(sys_get_robust_list, 3) /* 4310 */ + MIPS_SYS(sys_kexec_load, 4) + MIPS_SYS(sys_getcpu, 3) + MIPS_SYS(sys_epoll_pwait, 6) + MIPS_SYS(sys_ioprio_set, 3) + MIPS_SYS(sys_ioprio_get, 2) + MIPS_SYS(sys_utimensat, 4) + MIPS_SYS(sys_signalfd, 3) + MIPS_SYS(sys_ni_syscall, 0) /* was timerfd */ + MIPS_SYS(sys_eventfd, 1) + MIPS_SYS(sys_fallocate, 6) /* 4320 */ + MIPS_SYS(sys_timerfd_create, 2) + MIPS_SYS(sys_timerfd_gettime, 2) + MIPS_SYS(sys_timerfd_settime, 4) + MIPS_SYS(sys_signalfd4, 4) + MIPS_SYS(sys_eventfd2, 2) /* 4325 */ + MIPS_SYS(sys_epoll_create1, 1) + MIPS_SYS(sys_dup3, 3) + MIPS_SYS(sys_pipe2, 2) + MIPS_SYS(sys_inotify_init1, 1) + MIPS_SYS(sys_preadv, 6) /* 4330 */ + MIPS_SYS(sys_pwritev, 6) + MIPS_SYS(sys_rt_tgsigqueueinfo, 4) + MIPS_SYS(sys_perf_event_open, 5) + MIPS_SYS(sys_accept4, 4) + MIPS_SYS(sys_recvmmsg, 5) /* 4335 */ + MIPS_SYS(sys_fanotify_init, 2) + MIPS_SYS(sys_fanotify_mark, 6) + MIPS_SYS(sys_prlimit64, 4) + MIPS_SYS(sys_name_to_handle_at, 5) + MIPS_SYS(sys_open_by_handle_at, 3) /* 4340 */ + MIPS_SYS(sys_clock_adjtime, 2) + MIPS_SYS(sys_syncfs, 1) +}; +# undef MIPS_SYS +# endif /* O32 */ + +static int do_store_exclusive(CPUMIPSState *env) +{ + target_ulong addr; + target_ulong page_addr; + target_ulong val; + int flags; + int segv = 0; + int reg; + int d; + + addr = env->lladdr; + page_addr = addr & TARGET_PAGE_MASK; + start_exclusive(); + mmap_lock(); + flags = page_get_flags(page_addr); + if ((flags & PAGE_READ) == 0) { + segv = 1; + } else { + reg = env->llreg & 0x1f; + d = (env->llreg & 0x20) != 0; + if (d) { + segv = get_user_s64(val, addr); + } else { + segv = get_user_s32(val, addr); + } + if (!segv) { + if (val != env->llval) { + env->active_tc.gpr[reg] = 0; + } else { + if (d) { + segv = put_user_u64(env->llnewval, addr); + } else { + segv = put_user_u32(env->llnewval, addr); + } + if (!segv) { + env->active_tc.gpr[reg] = 1; + } + } + } + } + env->lladdr = -1; + if (!segv) { + env->active_tc.PC += 4; + } + mmap_unlock(); + end_exclusive(); + return segv; +} + +/* Break codes */ +enum { + BRK_OVERFLOW = 6, + BRK_DIVZERO = 7 +}; + +static int do_break(CPUMIPSState *env, target_siginfo_t *info, + unsigned int code) +{ + int ret = -1; + + switch (code) { + case BRK_OVERFLOW: + case BRK_DIVZERO: + info->si_signo = TARGET_SIGFPE; + info->si_errno = 0; + info->si_code = (code == BRK_OVERFLOW) ? FPE_INTOVF : FPE_INTDIV; + queue_signal(env, info->si_signo, &*info); + ret = 0; + break; + default: + info->si_signo = TARGET_SIGTRAP; + info->si_errno = 0; + queue_signal(env, info->si_signo, &*info); + ret = 0; + break; + } + + return ret; +} + +void cpu_loop(CPUMIPSState *env) +{ + CPUState *cs = CPU(mips_env_get_cpu(env)); + target_siginfo_t info; + int trapnr; + abi_long ret; +# ifdef TARGET_ABI_MIPSO32 + unsigned int syscall_num; +# endif + + for(;;) { + cpu_exec_start(cs); + trapnr = cpu_mips_exec(cs); + cpu_exec_end(cs); + switch(trapnr) { + case EXCP_SYSCALL: + env->active_tc.PC += 4; +# ifdef TARGET_ABI_MIPSO32 + syscall_num = env->active_tc.gpr[2] - 4000; + if (syscall_num >= sizeof(mips_syscall_args)) { + ret = -TARGET_ENOSYS; + } else { + int nb_args; + abi_ulong sp_reg; + abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0; + + nb_args = mips_syscall_args[syscall_num]; + sp_reg = env->active_tc.gpr[29]; + switch (nb_args) { + /* these arguments are taken from the stack */ + case 8: + if ((ret = get_user_ual(arg8, sp_reg + 28)) != 0) { + goto done_syscall; + } + case 7: + if ((ret = get_user_ual(arg7, sp_reg + 24)) != 0) { + goto done_syscall; + } + case 6: + if ((ret = get_user_ual(arg6, sp_reg + 20)) != 0) { + goto done_syscall; + } + case 5: + if ((ret = get_user_ual(arg5, sp_reg + 16)) != 0) { + goto done_syscall; + } + default: + break; + } + ret = do_syscall(env, env->active_tc.gpr[2], + env->active_tc.gpr[4], + env->active_tc.gpr[5], + env->active_tc.gpr[6], + env->active_tc.gpr[7], + arg5, arg6, arg7, arg8); + } +done_syscall: +# else + ret = do_syscall(env, env->active_tc.gpr[2], + env->active_tc.gpr[4], env->active_tc.gpr[5], + env->active_tc.gpr[6], env->active_tc.gpr[7], + env->active_tc.gpr[8], env->active_tc.gpr[9], + env->active_tc.gpr[10], env->active_tc.gpr[11]); +# endif /* O32 */ + if (ret == -TARGET_QEMU_ESIGRETURN) { + /* Returning from a successful sigreturn syscall. + Avoid clobbering register state. */ + break; + } + if ((abi_ulong)ret >= (abi_ulong)-1133) { + env->active_tc.gpr[7] = 1; /* error flag */ + ret = -ret; + } else { + env->active_tc.gpr[7] = 0; /* error flag */ + } + env->active_tc.gpr[2] = ret; + break; + case EXCP_TLBL: + case EXCP_TLBS: + case EXCP_AdEL: + case EXCP_AdES: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->CP0_BadVAddr; + queue_signal(env, info.si_signo, &info); + break; + case EXCP_CpU: + case EXCP_RI: + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = 0; + queue_signal(env, info.si_signo, &info); + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_DEBUG: + { + int sig; + + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) + { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + case EXCP_SC: + if (do_store_exclusive(env)) { + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->active_tc.PC; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP_DSPDIS: + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_ILLOPC; + queue_signal(env, info.si_signo, &info); + break; + /* The code below was inspired by the MIPS Linux kernel trap + * handling code in arch/mips/kernel/traps.c. + */ + case EXCP_BREAK: + { + abi_ulong trap_instr; + unsigned int code; + + if (env->hflags & MIPS_HFLAG_M16) { + if (env->insn_flags & ASE_MICROMIPS) { + /* microMIPS mode */ + ret = get_user_u16(trap_instr, env->active_tc.PC); + if (ret != 0) { + goto error; + } + + if ((trap_instr >> 10) == 0x11) { + /* 16-bit instruction */ + code = trap_instr & 0xf; + } else { + /* 32-bit instruction */ + abi_ulong instr_lo; + + ret = get_user_u16(instr_lo, + env->active_tc.PC + 2); + if (ret != 0) { + goto error; + } + trap_instr = (trap_instr << 16) | instr_lo; + code = ((trap_instr >> 6) & ((1 << 20) - 1)); + /* Unfortunately, microMIPS also suffers from + the old assembler bug... */ + if (code >= (1 << 10)) { + code >>= 10; + } + } + } else { + /* MIPS16e mode */ + ret = get_user_u16(trap_instr, env->active_tc.PC); + if (ret != 0) { + goto error; + } + code = (trap_instr >> 6) & 0x3f; + } + } else { + ret = get_user_u32(trap_instr, env->active_tc.PC); + if (ret != 0) { + goto error; + } + + /* As described in the original Linux kernel code, the + * below checks on 'code' are to work around an old + * assembly bug. + */ + code = ((trap_instr >> 6) & ((1 << 20) - 1)); + if (code >= (1 << 10)) { + code >>= 10; + } + } + + if (do_break(env, &info, code) != 0) { + goto error; + } + } + break; + case EXCP_TRAP: + { + abi_ulong trap_instr; + unsigned int code = 0; + + if (env->hflags & MIPS_HFLAG_M16) { + /* microMIPS mode */ + abi_ulong instr[2]; + + ret = get_user_u16(instr[0], env->active_tc.PC) || + get_user_u16(instr[1], env->active_tc.PC + 2); + + trap_instr = (instr[0] << 16) | instr[1]; + } else { + ret = get_user_u32(trap_instr, env->active_tc.PC); + } + + if (ret != 0) { + goto error; + } + + /* The immediate versions don't provide a code. */ + if (!(trap_instr & 0xFC000000)) { + if (env->hflags & MIPS_HFLAG_M16) { + /* microMIPS mode */ + code = ((trap_instr >> 12) & ((1 << 4) - 1)); + } else { + code = ((trap_instr >> 6) & ((1 << 10) - 1)); + } + } + + if (do_break(env, &info, code) != 0) { + goto error; + } + } + break; + default: +error: + fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", + trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + abort(); + } + process_pending_signals(env); + } +} +#endif + +#ifdef TARGET_OPENRISC + +void cpu_loop(CPUOpenRISCState *env) +{ + CPUState *cs = CPU(openrisc_env_get_cpu(env)); + int trapnr, gdbsig; + + for (;;) { + cpu_exec_start(cs); + trapnr = cpu_openrisc_exec(cs); + cpu_exec_end(cs); + gdbsig = 0; + + switch (trapnr) { + case EXCP_RESET: + qemu_log("\nReset request, exit, pc is %#x\n", env->pc); + exit(1); + break; + case EXCP_BUSERR: + qemu_log("\nBus error, exit, pc is %#x\n", env->pc); + gdbsig = TARGET_SIGBUS; + break; + case EXCP_DPF: + case EXCP_IPF: + cpu_dump_state(cs, stderr, fprintf, 0); + gdbsig = TARGET_SIGSEGV; + break; + case EXCP_TICK: + qemu_log("\nTick time interrupt pc is %#x\n", env->pc); + break; + case EXCP_ALIGN: + qemu_log("\nAlignment pc is %#x\n", env->pc); + gdbsig = TARGET_SIGBUS; + break; + case EXCP_ILLEGAL: + qemu_log("\nIllegal instructionpc is %#x\n", env->pc); + gdbsig = TARGET_SIGILL; + break; + case EXCP_INT: + qemu_log("\nExternal interruptpc is %#x\n", env->pc); + break; + case EXCP_DTLBMISS: + case EXCP_ITLBMISS: + qemu_log("\nTLB miss\n"); + break; + case EXCP_RANGE: + qemu_log("\nRange\n"); + gdbsig = TARGET_SIGSEGV; + break; + case EXCP_SYSCALL: + env->pc += 4; /* 0xc00; */ + env->gpr[11] = do_syscall(env, + env->gpr[11], /* return value */ + env->gpr[3], /* r3 - r7 are params */ + env->gpr[4], + env->gpr[5], + env->gpr[6], + env->gpr[7], + env->gpr[8], 0, 0); + break; + case EXCP_FPE: + qemu_log("\nFloating point error\n"); + break; + case EXCP_TRAP: + qemu_log("\nTrap\n"); + gdbsig = TARGET_SIGTRAP; + break; + case EXCP_NR: + qemu_log("\nNR\n"); + break; + default: + qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n", + trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + gdbsig = TARGET_SIGILL; + break; + } + if (gdbsig) { + gdb_handlesig(cs, gdbsig); + if (gdbsig != TARGET_SIGTRAP) { + exit(1); + } + } + + process_pending_signals(env); + } +} + +#endif /* TARGET_OPENRISC */ + +#ifdef TARGET_SH4 +void cpu_loop(CPUSH4State *env) +{ + CPUState *cs = CPU(sh_env_get_cpu(env)); + int trapnr, ret; + target_siginfo_t info; + + while (1) { + cpu_exec_start(cs); + trapnr = cpu_sh4_exec(cs); + cpu_exec_end(cs); + + switch (trapnr) { + case 0x160: + env->pc += 2; + ret = do_syscall(env, + env->gregs[3], + env->gregs[4], + env->gregs[5], + env->gregs[6], + env->gregs[7], + env->gregs[0], + env->gregs[1], + 0, 0); + env->gregs[0] = ret; + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_DEBUG: + { + int sig; + + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) + { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + case 0xa0: + case 0xc0: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->tea; + queue_signal(env, info.si_signo, &info); + break; + + default: + printf ("Unhandled trap: 0x%x\n", trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + exit (1); + } + process_pending_signals (env); + } +} +#endif + +#ifdef TARGET_CRIS +void cpu_loop(CPUCRISState *env) +{ + CPUState *cs = CPU(cris_env_get_cpu(env)); + int trapnr, ret; + target_siginfo_t info; + + while (1) { + cpu_exec_start(cs); + trapnr = cpu_cris_exec(cs); + cpu_exec_end(cs); + switch (trapnr) { + case 0xaa: + { + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->pregs[PR_EDA]; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_BREAK: + ret = do_syscall(env, + env->regs[9], + env->regs[10], + env->regs[11], + env->regs[12], + env->regs[13], + env->pregs[7], + env->pregs[11], + 0, 0); + env->regs[10] = ret; + break; + case EXCP_DEBUG: + { + int sig; + + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) + { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + default: + printf ("Unhandled trap: 0x%x\n", trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + exit (1); + } + process_pending_signals (env); + } +} +#endif + +#ifdef TARGET_MICROBLAZE +void cpu_loop(CPUMBState *env) +{ + CPUState *cs = CPU(mb_env_get_cpu(env)); + int trapnr, ret; + target_siginfo_t info; + + while (1) { + cpu_exec_start(cs); + trapnr = cpu_mb_exec(cs); + cpu_exec_end(cs); + switch (trapnr) { + case 0xaa: + { + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = 0; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_BREAK: + /* Return address is 4 bytes after the call. */ + env->regs[14] += 4; + env->sregs[SR_PC] = env->regs[14]; + ret = do_syscall(env, + env->regs[12], + env->regs[5], + env->regs[6], + env->regs[7], + env->regs[8], + env->regs[9], + env->regs[10], + 0, 0); + env->regs[3] = ret; + break; + case EXCP_HW_EXCP: + env->regs[17] = env->sregs[SR_PC] + 4; + if (env->iflags & D_FLAG) { + env->sregs[SR_ESR] |= 1 << 12; + env->sregs[SR_PC] -= 4; + /* FIXME: if branch was immed, replay the imm as well. */ + } + + env->iflags &= ~(IMM_FLAG | D_FLAG); + + switch (env->sregs[SR_ESR] & 31) { + case ESR_EC_DIVZERO: + info.si_signo = TARGET_SIGFPE; + info.si_errno = 0; + info.si_code = TARGET_FPE_FLTDIV; + info._sifields._sigfault._addr = 0; + queue_signal(env, info.si_signo, &info); + break; + case ESR_EC_FPU: + info.si_signo = TARGET_SIGFPE; + info.si_errno = 0; + if (env->sregs[SR_FSR] & FSR_IO) { + info.si_code = TARGET_FPE_FLTINV; + } + if (env->sregs[SR_FSR] & FSR_DZ) { + info.si_code = TARGET_FPE_FLTDIV; + } + info._sifields._sigfault._addr = 0; + queue_signal(env, info.si_signo, &info); + break; + default: + printf ("Unhandled hw-exception: 0x%x\n", + env->sregs[SR_ESR] & ESR_EC_MASK); + cpu_dump_state(cs, stderr, fprintf, 0); + exit (1); + break; + } + break; + case EXCP_DEBUG: + { + int sig; + + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) + { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + default: + printf ("Unhandled trap: 0x%x\n", trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + exit (1); + } + process_pending_signals (env); + } +} +#endif + +#ifdef TARGET_M68K + +void cpu_loop(CPUM68KState *env) +{ + CPUState *cs = CPU(m68k_env_get_cpu(env)); + int trapnr; + unsigned int n; + target_siginfo_t info; + TaskState *ts = cs->opaque; + + for(;;) { + cpu_exec_start(cs); + trapnr = cpu_m68k_exec(cs); + cpu_exec_end(cs); + switch(trapnr) { + case EXCP_ILLEGAL: + { + if (ts->sim_syscalls) { + uint16_t nr; + get_user_u16(nr, env->pc + 2); + env->pc += 4; + do_m68k_simcall(env, nr); + } else { + goto do_sigill; + } + } + break; + case EXCP_HALT_INSN: + /* Semihosing syscall. */ + env->pc += 4; + do_m68k_semihosting(env, env->dregs[0]); + break; + case EXCP_LINEA: + case EXCP_LINEF: + case EXCP_UNSUPPORTED: + do_sigill: + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_ILLOPN; + info._sifields._sigfault._addr = env->pc; + queue_signal(env, info.si_signo, &info); + break; + case EXCP_TRAP0: + { + ts->sim_syscalls = 0; + n = env->dregs[0]; + env->pc += 2; + env->dregs[0] = do_syscall(env, + n, + env->dregs[1], + env->dregs[2], + env->dregs[3], + env->dregs[4], + env->dregs[5], + env->aregs[0], + 0, 0); + } + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_ACCESS: + { + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->mmu.ar; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP_DEBUG: + { + int sig; + + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) + { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + default: + fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", + trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + abort(); + } + process_pending_signals(env); + } +} +#endif /* TARGET_M68K */ + +#ifdef TARGET_ALPHA +static void do_store_exclusive(CPUAlphaState *env, int reg, int quad) +{ + target_ulong addr, val, tmp; + target_siginfo_t info; + int ret = 0; + + addr = env->lock_addr; + tmp = env->lock_st_addr; + env->lock_addr = -1; + env->lock_st_addr = 0; + + start_exclusive(); + mmap_lock(); + + if (addr == tmp) { + if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) { + goto do_sigsegv; + } + + if (val == env->lock_value) { + tmp = env->ir[reg]; + if (quad ? put_user_u64(tmp, addr) : put_user_u32(tmp, addr)) { + goto do_sigsegv; + } + ret = 1; + } + } + env->ir[reg] = ret; + env->pc += 4; + + mmap_unlock(); + end_exclusive(); + return; + + do_sigsegv: + mmap_unlock(); + end_exclusive(); + + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = addr; + queue_signal(env, TARGET_SIGSEGV, &info); +} + +void cpu_loop(CPUAlphaState *env) +{ + CPUState *cs = CPU(alpha_env_get_cpu(env)); + int trapnr; + target_siginfo_t info; + abi_long sysret; + + while (1) { + cpu_exec_start(cs); + trapnr = cpu_alpha_exec(cs); + cpu_exec_end(cs); + + /* All of the traps imply a transition through PALcode, which + implies an REI instruction has been executed. Which means + that the intr_flag should be cleared. */ + env->intr_flag = 0; + + switch (trapnr) { + case EXCP_RESET: + fprintf(stderr, "Reset requested. Exit\n"); + exit(1); + break; + case EXCP_MCHK: + fprintf(stderr, "Machine check exception. Exit\n"); + exit(1); + break; + case EXCP_SMP_INTERRUPT: + case EXCP_CLK_INTERRUPT: + case EXCP_DEV_INTERRUPT: + fprintf(stderr, "External interrupt. Exit\n"); + exit(1); + break; + case EXCP_MMFAULT: + env->lock_addr = -1; + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID + ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR); + info._sifields._sigfault._addr = env->trap_arg0; + queue_signal(env, info.si_signo, &info); + break; + case EXCP_UNALIGN: + env->lock_addr = -1; + info.si_signo = TARGET_SIGBUS; + info.si_errno = 0; + info.si_code = TARGET_BUS_ADRALN; + info._sifields._sigfault._addr = env->trap_arg0; + queue_signal(env, info.si_signo, &info); + break; + case EXCP_OPCDEC: + do_sigill: + env->lock_addr = -1; + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_ILLOPC; + info._sifields._sigfault._addr = env->pc; + queue_signal(env, info.si_signo, &info); + break; + case EXCP_ARITH: + env->lock_addr = -1; + info.si_signo = TARGET_SIGFPE; + info.si_errno = 0; + info.si_code = TARGET_FPE_FLTINV; + info._sifields._sigfault._addr = env->pc; + queue_signal(env, info.si_signo, &info); + break; + case EXCP_FEN: + /* No-op. Linux simply re-enables the FPU. */ + break; + case EXCP_CALL_PAL: + env->lock_addr = -1; + switch (env->error_code) { + case 0x80: + /* BPT */ + info.si_signo = TARGET_SIGTRAP; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + info._sifields._sigfault._addr = env->pc; + queue_signal(env, info.si_signo, &info); + break; + case 0x81: + /* BUGCHK */ + info.si_signo = TARGET_SIGTRAP; + info.si_errno = 0; + info.si_code = 0; + info._sifields._sigfault._addr = env->pc; + queue_signal(env, info.si_signo, &info); + break; + case 0x83: + /* CALLSYS */ + trapnr = env->ir[IR_V0]; + sysret = do_syscall(env, trapnr, + env->ir[IR_A0], env->ir[IR_A1], + env->ir[IR_A2], env->ir[IR_A3], + env->ir[IR_A4], env->ir[IR_A5], + 0, 0); + if (trapnr == TARGET_NR_sigreturn + || trapnr == TARGET_NR_rt_sigreturn) { + break; + } + /* Syscall writes 0 to V0 to bypass error check, similar + to how this is handled internal to Linux kernel. + (Ab)use trapnr temporarily as boolean indicating error. */ + trapnr = (env->ir[IR_V0] != 0 && sysret < 0); + env->ir[IR_V0] = (trapnr ? -sysret : sysret); + env->ir[IR_A3] = trapnr; + break; + case 0x86: + /* IMB */ + /* ??? We can probably elide the code using page_unprotect + that is checking for self-modifying code. Instead we + could simply call tb_flush here. Until we work out the + changes required to turn off the extra write protection, + this can be a no-op. */ + break; + case 0x9E: + /* RDUNIQUE */ + /* Handled in the translator for usermode. */ + abort(); + case 0x9F: + /* WRUNIQUE */ + /* Handled in the translator for usermode. */ + abort(); + case 0xAA: + /* GENTRAP */ + info.si_signo = TARGET_SIGFPE; + switch (env->ir[IR_A0]) { + case TARGET_GEN_INTOVF: + info.si_code = TARGET_FPE_INTOVF; + break; + case TARGET_GEN_INTDIV: + info.si_code = TARGET_FPE_INTDIV; + break; + case TARGET_GEN_FLTOVF: + info.si_code = TARGET_FPE_FLTOVF; + break; + case TARGET_GEN_FLTUND: + info.si_code = TARGET_FPE_FLTUND; + break; + case TARGET_GEN_FLTINV: + info.si_code = TARGET_FPE_FLTINV; + break; + case TARGET_GEN_FLTINE: + info.si_code = TARGET_FPE_FLTRES; + break; + case TARGET_GEN_ROPRAND: + info.si_code = 0; + break; + default: + info.si_signo = TARGET_SIGTRAP; + info.si_code = 0; + break; + } + info.si_errno = 0; + info._sifields._sigfault._addr = env->pc; + queue_signal(env, info.si_signo, &info); + break; + default: + goto do_sigill; + } + break; + case EXCP_DEBUG: + info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP); + if (info.si_signo) { + env->lock_addr = -1; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP_STL_C: + case EXCP_STQ_C: + do_store_exclusive(env, env->error_code, trapnr - EXCP_STL_C); + break; + case EXCP_INTERRUPT: + /* Just indicate that signals should be handled asap. */ + break; + default: + printf ("Unhandled trap: 0x%x\n", trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + exit (1); + } + process_pending_signals (env); + } +} +#endif /* TARGET_ALPHA */ + +#ifdef TARGET_S390X +void cpu_loop(CPUS390XState *env) +{ + CPUState *cs = CPU(s390_env_get_cpu(env)); + int trapnr, n, sig; + target_siginfo_t info; + target_ulong addr; + + while (1) { + cpu_exec_start(cs); + trapnr = cpu_s390x_exec(cs); + cpu_exec_end(cs); + switch (trapnr) { + case EXCP_INTERRUPT: + /* Just indicate that signals should be handled asap. */ + break; + + case EXCP_SVC: + n = env->int_svc_code; + if (!n) { + /* syscalls > 255 */ + n = env->regs[1]; + } + env->psw.addr += env->int_svc_ilen; + env->regs[2] = do_syscall(env, n, env->regs[2], env->regs[3], + env->regs[4], env->regs[5], + env->regs[6], env->regs[7], 0, 0); + break; + + case EXCP_DEBUG: + sig = gdb_handlesig(cs, TARGET_SIGTRAP); + if (sig) { + n = TARGET_TRAP_BRKPT; + goto do_signal_pc; + } + break; + case EXCP_PGM: + n = env->int_pgm_code; + switch (n) { + case PGM_OPERATION: + case PGM_PRIVILEGED: + sig = TARGET_SIGILL; + n = TARGET_ILL_ILLOPC; + goto do_signal_pc; + case PGM_PROTECTION: + case PGM_ADDRESSING: + sig = TARGET_SIGSEGV; + /* XXX: check env->error_code */ + n = TARGET_SEGV_MAPERR; + addr = env->__excp_addr; + goto do_signal; + case PGM_EXECUTE: + case PGM_SPECIFICATION: + case PGM_SPECIAL_OP: + case PGM_OPERAND: + do_sigill_opn: + sig = TARGET_SIGILL; + n = TARGET_ILL_ILLOPN; + goto do_signal_pc; + + case PGM_FIXPT_OVERFLOW: + sig = TARGET_SIGFPE; + n = TARGET_FPE_INTOVF; + goto do_signal_pc; + case PGM_FIXPT_DIVIDE: + sig = TARGET_SIGFPE; + n = TARGET_FPE_INTDIV; + goto do_signal_pc; + + case PGM_DATA: + n = (env->fpc >> 8) & 0xff; + if (n == 0xff) { + /* compare-and-trap */ + goto do_sigill_opn; + } else { + /* An IEEE exception, simulated or otherwise. */ + if (n & 0x80) { + n = TARGET_FPE_FLTINV; + } else if (n & 0x40) { + n = TARGET_FPE_FLTDIV; + } else if (n & 0x20) { + n = TARGET_FPE_FLTOVF; + } else if (n & 0x10) { + n = TARGET_FPE_FLTUND; + } else if (n & 0x08) { + n = TARGET_FPE_FLTRES; + } else { + /* ??? Quantum exception; BFP, DFP error. */ + goto do_sigill_opn; + } + sig = TARGET_SIGFPE; + goto do_signal_pc; + } + + default: + fprintf(stderr, "Unhandled program exception: %#x\n", n); + cpu_dump_state(cs, stderr, fprintf, 0); + exit(1); + } + break; + + do_signal_pc: + addr = env->psw.addr; + do_signal: + info.si_signo = sig; + info.si_errno = 0; + info.si_code = n; + info._sifields._sigfault._addr = addr; + queue_signal(env, info.si_signo, &info); + break; + + default: + fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr); + cpu_dump_state(cs, stderr, fprintf, 0); + exit(1); + } + process_pending_signals (env); + } +} + +#endif /* TARGET_S390X */ + +THREAD CPUState *thread_cpu; + +void task_settid(TaskState *ts) +{ + if (ts->ts_tid == 0) { + ts->ts_tid = (pid_t)syscall(SYS_gettid); + } +} + +void stop_all_tasks(void) +{ + /* + * We trust that when using NPTL, start_exclusive() + * handles thread stopping correctly. + */ + start_exclusive(); +} + +/* Assumes contents are already zeroed. */ +void init_task_state(TaskState *ts) +{ + int i; + + ts->used = 1; + ts->first_free = ts->sigqueue_table; + for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) { + ts->sigqueue_table[i].next = &ts->sigqueue_table[i + 1]; + } + ts->sigqueue_table[i].next = NULL; +} + +CPUArchState *cpu_copy(CPUArchState *env) +{ + CPUState *cpu = ENV_GET_CPU(env); + CPUState *new_cpu = cpu_init(cpu_model); + CPUArchState *new_env = new_cpu->env_ptr; + CPUBreakpoint *bp; + CPUWatchpoint *wp; + + /* Reset non arch specific state */ + cpu_reset(new_cpu); + + memcpy(new_env, env, sizeof(CPUArchState)); + + /* Clone all break/watchpoints. + Note: Once we support ptrace with hw-debug register access, make sure + BP_CPU break/watchpoints are handled correctly on clone. */ + QTAILQ_INIT(&new_cpu->breakpoints); + QTAILQ_INIT(&new_cpu->watchpoints); + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL); + } + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags, NULL); + } + + return new_env; +} + +static void handle_arg_help(const char *arg) +{ + usage(); +} + +static void handle_arg_log(const char *arg) +{ + int mask; + + mask = qemu_str_to_log_mask(arg); + if (!mask) { + qemu_print_log_usage(stdout); + exit(1); + } + qemu_set_log(mask); +} + +static void handle_arg_log_filename(const char *arg) +{ + qemu_set_log_filename(arg); +} + +static void handle_arg_set_env(const char *arg) +{ + char *r, *p, *token; + r = p = strdup(arg); + while ((token = strsep(&p, ",")) != NULL) { + if (envlist_setenv(envlist, token) != 0) { + usage(); + } + } + free(r); +} + +static void handle_arg_unset_env(const char *arg) +{ + char *r, *p, *token; + r = p = strdup(arg); + while ((token = strsep(&p, ",")) != NULL) { + if (envlist_unsetenv(envlist, token) != 0) { + usage(); + } + } + free(r); +} + +static void handle_arg_argv0(const char *arg) +{ + argv0 = strdup(arg); +} + +static void handle_arg_stack_size(const char *arg) +{ + char *p; + guest_stack_size = strtoul(arg, &p, 0); + if (guest_stack_size == 0) { + usage(); + } + + if (*p == 'M') { + guest_stack_size *= 1024 * 1024; + } else if (*p == 'k' || *p == 'K') { + guest_stack_size *= 1024; + } +} + +static void handle_arg_ld_prefix(const char *arg) +{ + interp_prefix = strdup(arg); +} + +static void handle_arg_pagesize(const char *arg) +{ + qemu_host_page_size = atoi(arg); + if (qemu_host_page_size == 0 || + (qemu_host_page_size & (qemu_host_page_size - 1)) != 0) { + fprintf(stderr, "page size must be a power of two\n"); + exit(1); + } +} + +static void handle_arg_randseed(const char *arg) +{ + unsigned long long seed; + + if (parse_uint_full(arg, &seed, 0) != 0 || seed > UINT_MAX) { + fprintf(stderr, "Invalid seed number: %s\n", arg); + exit(1); + } + srand(seed); +} + +static void handle_arg_gdb(const char *arg) +{ + gdbstub_port = atoi(arg); +} + +static void handle_arg_uname(const char *arg) +{ + qemu_uname_release = strdup(arg); +} + +static void handle_arg_cpu(const char *arg) +{ + cpu_model = strdup(arg); + if (cpu_model == NULL || is_help_option(cpu_model)) { + /* XXX: implement xxx_cpu_list for targets that still miss it */ +#if defined(cpu_list) + cpu_list(stdout, &fprintf); +#endif + exit(1); + } +} + +#if defined(CONFIG_USE_GUEST_BASE) +static void handle_arg_guest_base(const char *arg) +{ + guest_base = strtol(arg, NULL, 0); + have_guest_base = 1; +} + +static void handle_arg_reserved_va(const char *arg) +{ + char *p; + int shift = 0; + reserved_va = strtoul(arg, &p, 0); + switch (*p) { + case 'k': + case 'K': + shift = 10; + break; + case 'M': + shift = 20; + break; + case 'G': + shift = 30; + break; + } + if (shift) { + unsigned long unshifted = reserved_va; + p++; + reserved_va <<= shift; + if (((reserved_va >> shift) != unshifted) +#if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS + || (reserved_va > (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) +#endif + ) { + fprintf(stderr, "Reserved virtual address too big\n"); + exit(1); + } + } + if (*p) { + fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p); + exit(1); + } +} +#endif + +static void handle_arg_singlestep(const char *arg) +{ + singlestep = 1; +} + +static void handle_arg_strace(const char *arg) +{ + do_strace = 1; +} + +static void handle_arg_version(const char *arg) +{ + printf("qemu-" TARGET_NAME " version " QEMU_VERSION QEMU_PKGVERSION + ", Copyright (c) 2003-2008 Fabrice Bellard\n"); + exit(0); +} + +struct qemu_argument { + const char *argv; + const char *env; + bool has_arg; + void (*handle_opt)(const char *arg); + const char *example; + const char *help; +}; + +static const struct qemu_argument arg_table[] = { + {"h", "", false, handle_arg_help, + "", "print this help"}, + {"g", "QEMU_GDB", true, handle_arg_gdb, + "port", "wait gdb connection to 'port'"}, + {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix, + "path", "set the elf interpreter prefix to 'path'"}, + {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size, + "size", "set the stack size to 'size' bytes"}, + {"cpu", "QEMU_CPU", true, handle_arg_cpu, + "model", "select CPU (-cpu help for list)"}, + {"E", "QEMU_SET_ENV", true, handle_arg_set_env, + "var=value", "sets targets environment variable (see below)"}, + {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env, + "var", "unsets targets environment variable (see below)"}, + {"0", "QEMU_ARGV0", true, handle_arg_argv0, + "argv0", "forces target process argv[0] to be 'argv0'"}, + {"r", "QEMU_UNAME", true, handle_arg_uname, + "uname", "set qemu uname release string to 'uname'"}, +#if defined(CONFIG_USE_GUEST_BASE) + {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base, + "address", "set guest_base address to 'address'"}, + {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va, + "size", "reserve 'size' bytes for guest virtual address space"}, +#endif + {"d", "QEMU_LOG", true, handle_arg_log, + "item[,...]", "enable logging of specified items " + "(use '-d help' for a list of items)"}, + {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename, + "logfile", "write logs to 'logfile' (default stderr)"}, + {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize, + "pagesize", "set the host page size to 'pagesize'"}, + {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep, + "", "run in singlestep mode"}, + {"strace", "QEMU_STRACE", false, handle_arg_strace, + "", "log system calls"}, + {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed, + "", "Seed for pseudo-random number generator"}, + {"version", "QEMU_VERSION", false, handle_arg_version, + "", "display version information and exit"}, + {NULL, NULL, false, NULL, NULL, NULL} +}; + +static void usage(void) +{ + const struct qemu_argument *arginfo; + int maxarglen; + int maxenvlen; + + printf("usage: qemu-" TARGET_NAME " [options] program [arguments...]\n" + "Linux CPU emulator (compiled for " TARGET_NAME " emulation)\n" + "\n" + "Options and associated environment variables:\n" + "\n"); + + /* Calculate column widths. We must always have at least enough space + * for the column header. + */ + maxarglen = strlen("Argument"); + maxenvlen = strlen("Env-variable"); + + for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { + int arglen = strlen(arginfo->argv); + if (arginfo->has_arg) { + arglen += strlen(arginfo->example) + 1; + } + if (strlen(arginfo->env) > maxenvlen) { + maxenvlen = strlen(arginfo->env); + } + if (arglen > maxarglen) { + maxarglen = arglen; + } + } + + printf("%-*s %-*s Description\n", maxarglen+1, "Argument", + maxenvlen, "Env-variable"); + + for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { + if (arginfo->has_arg) { + printf("-%s %-*s %-*s %s\n", arginfo->argv, + (int)(maxarglen - strlen(arginfo->argv) - 1), + arginfo->example, maxenvlen, arginfo->env, arginfo->help); + } else { + printf("-%-*s %-*s %s\n", maxarglen, arginfo->argv, + maxenvlen, arginfo->env, + arginfo->help); + } + } + + printf("\n" + "Defaults:\n" + "QEMU_LD_PREFIX = %s\n" + "QEMU_STACK_SIZE = %ld byte\n", + interp_prefix, + guest_stack_size); + + printf("\n" + "You can use -E and -U options or the QEMU_SET_ENV and\n" + "QEMU_UNSET_ENV environment variables to set and unset\n" + "environment variables for the target process.\n" + "It is possible to provide several variables by separating them\n" + "by commas in getsubopt(3) style. Additionally it is possible to\n" + "provide the -E and -U options multiple times.\n" + "The following lines are equivalent:\n" + " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n" + " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n" + " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n" + "Note that if you provide several changes to a single variable\n" + "the last change will stay in effect.\n"); + + exit(1); +} + +static int parse_args(int argc, char **argv) +{ + const char *r; + int optind; + const struct qemu_argument *arginfo; + + for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { + if (arginfo->env == NULL) { + continue; + } + + r = getenv(arginfo->env); + if (r != NULL) { + arginfo->handle_opt(r); + } + } + + optind = 1; + for (;;) { + if (optind >= argc) { + break; + } + r = argv[optind]; + if (r[0] != '-') { + break; + } + optind++; + r++; + if (!strcmp(r, "-")) { + break; + } + + for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) { + if (!strcmp(r, arginfo->argv)) { + if (arginfo->has_arg) { + if (optind >= argc) { + usage(); + } + arginfo->handle_opt(argv[optind]); + optind++; + } else { + arginfo->handle_opt(NULL); + } + break; + } + } + + /* no option matched the current argv */ + if (arginfo->handle_opt == NULL) { + usage(); + } + } + + if (optind >= argc) { + usage(); + } + + filename = argv[optind]; + exec_path = argv[optind]; + + return optind; +} + +int main(int argc, char **argv, char **envp) +{ + struct target_pt_regs regs1, *regs = ®s1; + struct image_info info1, *info = &info1; + struct linux_binprm bprm; + TaskState *ts; + CPUArchState *env; + CPUState *cpu; + int optind; + char **target_environ, **wrk; + char **target_argv; + int target_argc; + int i; + int ret; + int execfd; + + module_call_init(MODULE_INIT_QOM); + + if ((envlist = envlist_create()) == NULL) { + (void) fprintf(stderr, "Unable to allocate envlist\n"); + exit(1); + } + + /* add current environment into the list */ + for (wrk = environ; *wrk != NULL; wrk++) { + (void) envlist_setenv(envlist, *wrk); + } + + /* Read the stack limit from the kernel. If it's "unlimited", + then we can do little else besides use the default. */ + { + struct rlimit lim; + if (getrlimit(RLIMIT_STACK, &lim) == 0 + && lim.rlim_cur != RLIM_INFINITY + && lim.rlim_cur == (target_long)lim.rlim_cur) { + guest_stack_size = lim.rlim_cur; + } + } + + cpu_model = NULL; +#if defined(cpudef_setup) + cpudef_setup(); /* parse cpu definitions in target config file (TBD) */ +#endif + + srand(time(NULL)); + + optind = parse_args(argc, argv); + + /* Zero out regs */ + memset(regs, 0, sizeof(struct target_pt_regs)); + + /* Zero out image_info */ + memset(info, 0, sizeof(struct image_info)); + + memset(&bprm, 0, sizeof (bprm)); + + /* Scan interp_prefix dir for replacement files. */ + init_paths(interp_prefix); + + init_qemu_uname_release(); + + if (cpu_model == NULL) { +#if defined(TARGET_I386) +#ifdef TARGET_X86_64 + cpu_model = "qemu64"; +#else + cpu_model = "qemu32"; +#endif +#elif defined(TARGET_ARM) + cpu_model = "any"; +#elif defined(TARGET_UNICORE32) + cpu_model = "any"; +#elif defined(TARGET_M68K) + cpu_model = "any"; +#elif defined(TARGET_SPARC) +#ifdef TARGET_SPARC64 + cpu_model = "TI UltraSparc II"; +#else + cpu_model = "Fujitsu MB86904"; +#endif +#elif defined(TARGET_MIPS) +#if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) + cpu_model = "5KEf"; +#else + cpu_model = "24Kf"; +#endif +#elif defined TARGET_OPENRISC + cpu_model = "or1200"; +#elif defined(TARGET_PPC) +# ifdef TARGET_PPC64 + cpu_model = "POWER7"; +# else + cpu_model = "750"; +# endif +#elif defined TARGET_SH4 + cpu_model = TYPE_SH7785_CPU; +#else + cpu_model = "any"; +#endif + } + tcg_exec_init(0); + /* NOTE: we need to init the CPU at this stage to get + qemu_host_page_size */ + cpu = cpu_init(cpu_model); + if (!cpu) { + fprintf(stderr, "Unable to find CPU definition\n"); + exit(1); + } + env = cpu->env_ptr; + cpu_reset(cpu); + + thread_cpu = cpu; + + if (getenv("QEMU_STRACE")) { + do_strace = 1; + } + + if (getenv("QEMU_RAND_SEED")) { + handle_arg_randseed(getenv("QEMU_RAND_SEED")); + } + + target_environ = envlist_to_environ(envlist, NULL); + envlist_free(envlist); + +#if defined(CONFIG_USE_GUEST_BASE) + /* + * Now that page sizes are configured in cpu_init() we can do + * proper page alignment for guest_base. + */ + guest_base = HOST_PAGE_ALIGN(guest_base); + + if (reserved_va || have_guest_base) { + guest_base = init_guest_space(guest_base, reserved_va, 0, + have_guest_base); + if (guest_base == (unsigned long)-1) { + fprintf(stderr, "Unable to reserve 0x%lx bytes of virtual address " + "space for use as guest address space (check your virtual " + "memory ulimit setting or reserve less using -R option)\n", + reserved_va); + exit(1); + } + + if (reserved_va) { + mmap_next_start = reserved_va; + } + } +#endif /* CONFIG_USE_GUEST_BASE */ + + /* + * Read in mmap_min_addr kernel parameter. This value is used + * When loading the ELF image to determine whether guest_base + * is needed. It is also used in mmap_find_vma. + */ + { + FILE *fp; + + if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) { + unsigned long tmp; + if (fscanf(fp, "%lu", &tmp) == 1) { + mmap_min_addr = tmp; + qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr); + } + fclose(fp); + } + } + + /* + * Prepare copy of argv vector for target. + */ + target_argc = argc - optind; + target_argv = calloc(target_argc + 1, sizeof (char *)); + if (target_argv == NULL) { + (void) fprintf(stderr, "Unable to allocate memory for target_argv\n"); + exit(1); + } + + /* + * If argv0 is specified (using '-0' switch) we replace + * argv[0] pointer with the given one. + */ + i = 0; + if (argv0 != NULL) { + target_argv[i++] = strdup(argv0); + } + for (; i < target_argc; i++) { + target_argv[i] = strdup(argv[optind + i]); + } + target_argv[target_argc] = NULL; + + ts = g_malloc0 (sizeof(TaskState)); + init_task_state(ts); + /* build Task State */ + ts->info = info; + ts->bprm = &bprm; + cpu->opaque = ts; + task_settid(ts); + + execfd = qemu_getauxval(AT_EXECFD); + if (execfd == 0) { + execfd = open(filename, O_RDONLY); + if (execfd < 0) { + printf("Error while loading %s: %s\n", filename, strerror(errno)); + _exit(1); + } + } + + ret = loader_exec(execfd, filename, target_argv, target_environ, regs, + info, &bprm); + if (ret != 0) { + printf("Error while loading %s: %s\n", filename, strerror(-ret)); + _exit(1); + } + + for (wrk = target_environ; *wrk; wrk++) { + free(*wrk); + } + + free(target_environ); + + if (qemu_log_enabled()) { +#if defined(CONFIG_USE_GUEST_BASE) + qemu_log("guest_base 0x%lx\n", guest_base); +#endif + log_page_dump(); + + qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk); + qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code); + qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n", + info->start_code); + qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n", + info->start_data); + qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data); + qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n", + info->start_stack); + qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk); + qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry); + } + + target_set_brk(info->brk); + syscall_init(); + signal_init(); + +#if defined(CONFIG_USE_GUEST_BASE) + /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay + generating the prologue until now so that the prologue can take + the real value of GUEST_BASE into account. */ + tcg_prologue_init(&tcg_ctx); +#endif + +#if defined(TARGET_I386) + env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK; + env->hflags |= HF_PE_MASK | HF_CPL_MASK; + if (env->features[FEAT_1_EDX] & CPUID_SSE) { + env->cr[4] |= CR4_OSFXSR_MASK; + env->hflags |= HF_OSFXSR_MASK; + } +#ifndef TARGET_ABI32 + /* enable 64 bit mode if possible */ + if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) { + fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n"); + exit(1); + } + env->cr[4] |= CR4_PAE_MASK; + env->efer |= MSR_EFER_LMA | MSR_EFER_LME; + env->hflags |= HF_LMA_MASK; +#endif + + /* flags setup : we activate the IRQs by default as in user mode */ + env->eflags |= IF_MASK; + + /* linux register setup */ +#ifndef TARGET_ABI32 + env->regs[R_EAX] = regs->rax; + env->regs[R_EBX] = regs->rbx; + env->regs[R_ECX] = regs->rcx; + env->regs[R_EDX] = regs->rdx; + env->regs[R_ESI] = regs->rsi; + env->regs[R_EDI] = regs->rdi; + env->regs[R_EBP] = regs->rbp; + env->regs[R_ESP] = regs->rsp; + env->eip = regs->rip; +#else + env->regs[R_EAX] = regs->eax; + env->regs[R_EBX] = regs->ebx; + env->regs[R_ECX] = regs->ecx; + env->regs[R_EDX] = regs->edx; + env->regs[R_ESI] = regs->esi; + env->regs[R_EDI] = regs->edi; + env->regs[R_EBP] = regs->ebp; + env->regs[R_ESP] = regs->esp; + env->eip = regs->eip; +#endif + + /* linux interrupt setup */ +#ifndef TARGET_ABI32 + env->idt.limit = 511; +#else + env->idt.limit = 255; +#endif + env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1), + PROT_READ|PROT_WRITE, + MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); + idt_table = g2h(env->idt.base); + set_idt(0, 0); + set_idt(1, 0); + set_idt(2, 0); + set_idt(3, 3); + set_idt(4, 3); + set_idt(5, 0); + set_idt(6, 0); + set_idt(7, 0); + set_idt(8, 0); + set_idt(9, 0); + set_idt(10, 0); + set_idt(11, 0); + set_idt(12, 0); + set_idt(13, 0); + set_idt(14, 0); + set_idt(15, 0); + set_idt(16, 0); + set_idt(17, 0); + set_idt(18, 0); + set_idt(19, 0); + set_idt(0x80, 3); + + /* linux segment setup */ + { + uint64_t *gdt_table; + env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES, + PROT_READ|PROT_WRITE, + MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); + env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1; + gdt_table = g2h(env->gdt.base); +#ifdef TARGET_ABI32 + write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | + (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT)); +#else + /* 64 bit code segment */ + write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | + DESC_L_MASK | + (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT)); +#endif + write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | + (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT)); + } + cpu_x86_load_seg(env, R_CS, __USER_CS); + cpu_x86_load_seg(env, R_SS, __USER_DS); +#ifdef TARGET_ABI32 + cpu_x86_load_seg(env, R_DS, __USER_DS); + cpu_x86_load_seg(env, R_ES, __USER_DS); + cpu_x86_load_seg(env, R_FS, __USER_DS); + cpu_x86_load_seg(env, R_GS, __USER_DS); + /* This hack makes Wine work... */ + env->segs[R_FS].selector = 0; +#else + cpu_x86_load_seg(env, R_DS, 0); + cpu_x86_load_seg(env, R_ES, 0); + cpu_x86_load_seg(env, R_FS, 0); + cpu_x86_load_seg(env, R_GS, 0); +#endif +#elif defined(TARGET_AARCH64) + { + int i; + + if (!(arm_feature(env, ARM_FEATURE_AARCH64))) { + fprintf(stderr, + "The selected ARM CPU does not support 64 bit mode\n"); + exit(1); + } + + for (i = 0; i < 31; i++) { + env->xregs[i] = regs->regs[i]; + } + env->pc = regs->pc; + env->xregs[31] = regs->sp; + } +#elif defined(TARGET_ARM) + { + int i; + cpsr_write(env, regs->uregs[16], 0xffffffff); + for(i = 0; i < 16; i++) { + env->regs[i] = regs->uregs[i]; + } + /* Enable BE8. */ + if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4 + && (info->elf_flags & EF_ARM_BE8)) { + env->bswap_code = 1; + } + } +#elif defined(TARGET_UNICORE32) + { + int i; + cpu_asr_write(env, regs->uregs[32], 0xffffffff); + for (i = 0; i < 32; i++) { + env->regs[i] = regs->uregs[i]; + } + } +#elif defined(TARGET_SPARC) + { + int i; + env->pc = regs->pc; + env->npc = regs->npc; + env->y = regs->y; + for(i = 0; i < 8; i++) + env->gregs[i] = regs->u_regs[i]; + for(i = 0; i < 8; i++) + env->regwptr[i] = regs->u_regs[i + 8]; + } +#elif defined(TARGET_PPC) + { + int i; + +#if defined(TARGET_PPC64) +#if defined(TARGET_ABI32) + env->msr &= ~((target_ulong)1 << MSR_SF); +#else + env->msr |= (target_ulong)1 << MSR_SF; +#endif +#endif + env->nip = regs->nip; + for(i = 0; i < 32; i++) { + env->gpr[i] = regs->gpr[i]; + } + } +#elif defined(TARGET_M68K) + { + env->pc = regs->pc; + env->dregs[0] = regs->d0; + env->dregs[1] = regs->d1; + env->dregs[2] = regs->d2; + env->dregs[3] = regs->d3; + env->dregs[4] = regs->d4; + env->dregs[5] = regs->d5; + env->dregs[6] = regs->d6; + env->dregs[7] = regs->d7; + env->aregs[0] = regs->a0; + env->aregs[1] = regs->a1; + env->aregs[2] = regs->a2; + env->aregs[3] = regs->a3; + env->aregs[4] = regs->a4; + env->aregs[5] = regs->a5; + env->aregs[6] = regs->a6; + env->aregs[7] = regs->usp; + env->sr = regs->sr; + ts->sim_syscalls = 1; + } +#elif defined(TARGET_MICROBLAZE) + { + env->regs[0] = regs->r0; + env->regs[1] = regs->r1; + env->regs[2] = regs->r2; + env->regs[3] = regs->r3; + env->regs[4] = regs->r4; + env->regs[5] = regs->r5; + env->regs[6] = regs->r6; + env->regs[7] = regs->r7; + env->regs[8] = regs->r8; + env->regs[9] = regs->r9; + env->regs[10] = regs->r10; + env->regs[11] = regs->r11; + env->regs[12] = regs->r12; + env->regs[13] = regs->r13; + env->regs[14] = regs->r14; + env->regs[15] = regs->r15; + env->regs[16] = regs->r16; + env->regs[17] = regs->r17; + env->regs[18] = regs->r18; + env->regs[19] = regs->r19; + env->regs[20] = regs->r20; + env->regs[21] = regs->r21; + env->regs[22] = regs->r22; + env->regs[23] = regs->r23; + env->regs[24] = regs->r24; + env->regs[25] = regs->r25; + env->regs[26] = regs->r26; + env->regs[27] = regs->r27; + env->regs[28] = regs->r28; + env->regs[29] = regs->r29; + env->regs[30] = regs->r30; + env->regs[31] = regs->r31; + env->sregs[SR_PC] = regs->pc; + } +#elif defined(TARGET_MIPS) + { + int i; + + for(i = 0; i < 32; i++) { + env->active_tc.gpr[i] = regs->regs[i]; + } + env->active_tc.PC = regs->cp0_epc & ~(target_ulong)1; + if (regs->cp0_epc & 1) { + env->hflags |= MIPS_HFLAG_M16; + } + } +#elif defined(TARGET_OPENRISC) + { + int i; + + for (i = 0; i < 32; i++) { + env->gpr[i] = regs->gpr[i]; + } + + env->sr = regs->sr; + env->pc = regs->pc; + } +#elif defined(TARGET_SH4) + { + int i; + + for(i = 0; i < 16; i++) { + env->gregs[i] = regs->regs[i]; + } + env->pc = regs->pc; + } +#elif defined(TARGET_ALPHA) + { + int i; + + for(i = 0; i < 28; i++) { + env->ir[i] = ((abi_ulong *)regs)[i]; + } + env->ir[IR_SP] = regs->usp; + env->pc = regs->pc; + } +#elif defined(TARGET_CRIS) + { + env->regs[0] = regs->r0; + env->regs[1] = regs->r1; + env->regs[2] = regs->r2; + env->regs[3] = regs->r3; + env->regs[4] = regs->r4; + env->regs[5] = regs->r5; + env->regs[6] = regs->r6; + env->regs[7] = regs->r7; + env->regs[8] = regs->r8; + env->regs[9] = regs->r9; + env->regs[10] = regs->r10; + env->regs[11] = regs->r11; + env->regs[12] = regs->r12; + env->regs[13] = regs->r13; + env->regs[14] = info->start_stack; + env->regs[15] = regs->acr; + env->pc = regs->erp; + } +#elif defined(TARGET_S390X) + { + int i; + for (i = 0; i < 16; i++) { + env->regs[i] = regs->gprs[i]; + } + env->psw.mask = regs->psw.mask; + env->psw.addr = regs->psw.addr; + } +#else +#error unsupported target CPU +#endif + +#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) + ts->stack_base = info->start_stack; + ts->heap_base = info->brk; + /* This will be filled in on the first SYS_HEAPINFO call. */ + ts->heap_limit = 0; +#endif + + if (gdbstub_port) { + if (gdbserver_start(gdbstub_port) < 0) { + fprintf(stderr, "qemu: could not open gdbserver on port %d\n", + gdbstub_port); + exit(1); + } + gdb_handlesig(cpu, 0); + } + cpu_loop(env); + /* never exits */ + return 0; +} |