From e44e3482bdb4d0ebde2d8b41830ac2cdb07948fb Mon Sep 17 00:00:00 2001
From: Yang Zhang <yang.z.zhang@intel.com>
Date: Fri, 28 Aug 2015 09:58:54 +0800
Subject: Add qemu 2.4.0

Change-Id: Ic99cbad4b61f8b127b7dc74d04576c0bcbaaf4f5
Signed-off-by: Yang Zhang <yang.z.zhang@intel.com>
---
 qemu/target-s390x/Makefile.objs   |    5 +
 qemu/target-s390x/arch_dump.c     |  254 ++
 qemu/target-s390x/cc_helper.c     |  568 ++++
 qemu/target-s390x/cpu-qom.h       |  103 +
 qemu/target-s390x/cpu.c           |  374 +++
 qemu/target-s390x/cpu.h           | 1281 +++++++++
 qemu/target-s390x/fpu_helper.c    |  743 +++++
 qemu/target-s390x/gdbstub.c       |  190 ++
 qemu/target-s390x/helper.c        |  652 +++++
 qemu/target-s390x/helper.h        |  133 +
 qemu/target-s390x/insn-data.def   |  931 +++++++
 qemu/target-s390x/insn-format.def |   55 +
 qemu/target-s390x/int_helper.c    |  154 ++
 qemu/target-s390x/interrupt.c     |   66 +
 qemu/target-s390x/ioinst.c        |  831 ++++++
 qemu/target-s390x/ioinst.h        |  246 ++
 qemu/target-s390x/kvm.c           | 2245 +++++++++++++++
 qemu/target-s390x/machine.c       |  170 ++
 qemu/target-s390x/mem_helper.c    | 1163 ++++++++
 qemu/target-s390x/misc_helper.c   |  642 +++++
 qemu/target-s390x/mmu_helper.c    |  480 ++++
 qemu/target-s390x/translate.c     | 5473 +++++++++++++++++++++++++++++++++++++
 22 files changed, 16759 insertions(+)
 create mode 100644 qemu/target-s390x/Makefile.objs
 create mode 100644 qemu/target-s390x/arch_dump.c
 create mode 100644 qemu/target-s390x/cc_helper.c
 create mode 100644 qemu/target-s390x/cpu-qom.h
 create mode 100644 qemu/target-s390x/cpu.c
 create mode 100644 qemu/target-s390x/cpu.h
 create mode 100644 qemu/target-s390x/fpu_helper.c
 create mode 100644 qemu/target-s390x/gdbstub.c
 create mode 100644 qemu/target-s390x/helper.c
 create mode 100644 qemu/target-s390x/helper.h
 create mode 100644 qemu/target-s390x/insn-data.def
 create mode 100644 qemu/target-s390x/insn-format.def
 create mode 100644 qemu/target-s390x/int_helper.c
 create mode 100644 qemu/target-s390x/interrupt.c
 create mode 100644 qemu/target-s390x/ioinst.c
 create mode 100644 qemu/target-s390x/ioinst.h
 create mode 100644 qemu/target-s390x/kvm.c
 create mode 100644 qemu/target-s390x/machine.c
 create mode 100644 qemu/target-s390x/mem_helper.c
 create mode 100644 qemu/target-s390x/misc_helper.c
 create mode 100644 qemu/target-s390x/mmu_helper.c
 create mode 100644 qemu/target-s390x/translate.c

(limited to 'qemu/target-s390x')

diff --git a/qemu/target-s390x/Makefile.objs b/qemu/target-s390x/Makefile.objs
new file mode 100644
index 000000000..dd62cbda8
--- /dev/null
+++ b/qemu/target-s390x/Makefile.objs
@@ -0,0 +1,5 @@
+obj-y += translate.o helper.o cpu.o interrupt.o
+obj-y += int_helper.o fpu_helper.o cc_helper.o mem_helper.o misc_helper.o
+obj-y += gdbstub.o
+obj-$(CONFIG_SOFTMMU) += machine.o ioinst.o arch_dump.o mmu_helper.o
+obj-$(CONFIG_KVM) += kvm.o
diff --git a/qemu/target-s390x/arch_dump.c b/qemu/target-s390x/arch_dump.c
new file mode 100644
index 000000000..dab63eb44
--- /dev/null
+++ b/qemu/target-s390x/arch_dump.c
@@ -0,0 +1,254 @@
+/*
+ * writing ELF notes for s390x arch
+ *
+ *
+ * Copyright IBM Corp. 2012, 2013
+ *
+ *     Ekaterina Tumanova <tumanova@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "cpu.h"
+#include "elf.h"
+#include "exec/cpu-all.h"
+#include "sysemu/dump.h"
+#include "sysemu/kvm.h"
+
+
+struct S390xUserRegsStruct {
+    uint64_t psw[2];
+    uint64_t gprs[16];
+    uint32_t acrs[16];
+} QEMU_PACKED;
+
+typedef struct S390xUserRegsStruct S390xUserRegs;
+
+struct S390xElfPrstatusStruct {
+    uint8_t pad1[32];
+    uint32_t pid;
+    uint8_t pad2[76];
+    S390xUserRegs regs;
+    uint8_t pad3[16];
+} QEMU_PACKED;
+
+typedef struct S390xElfPrstatusStruct S390xElfPrstatus;
+
+struct S390xElfFpregsetStruct {
+    uint32_t fpc;
+    uint32_t pad;
+    uint64_t fprs[16];
+} QEMU_PACKED;
+
+typedef struct S390xElfFpregsetStruct S390xElfFpregset;
+
+struct S390xElfVregsLoStruct {
+    uint64_t vregs[16];
+} QEMU_PACKED;
+
+typedef struct S390xElfVregsLoStruct S390xElfVregsLo;
+
+struct S390xElfVregsHiStruct {
+    uint64_t vregs[16][2];
+} QEMU_PACKED;
+
+typedef struct S390xElfVregsHiStruct S390xElfVregsHi;
+
+typedef struct noteStruct {
+    Elf64_Nhdr hdr;
+    char name[5];
+    char pad3[3];
+    union {
+        S390xElfPrstatus prstatus;
+        S390xElfFpregset fpregset;
+        S390xElfVregsLo vregslo;
+        S390xElfVregsHi vregshi;
+        uint32_t prefix;
+        uint64_t timer;
+        uint64_t todcmp;
+        uint32_t todpreg;
+        uint64_t ctrs[16];
+    } contents;
+} QEMU_PACKED Note;
+
+static void s390x_write_elf64_prstatus(Note *note, S390CPU *cpu)
+{
+    int i;
+    S390xUserRegs *regs;
+
+    note->hdr.n_type = cpu_to_be32(NT_PRSTATUS);
+
+    regs = &(note->contents.prstatus.regs);
+    regs->psw[0] = cpu_to_be64(cpu->env.psw.mask);
+    regs->psw[1] = cpu_to_be64(cpu->env.psw.addr);
+    for (i = 0; i <= 15; i++) {
+        regs->acrs[i] = cpu_to_be32(cpu->env.aregs[i]);
+        regs->gprs[i] = cpu_to_be64(cpu->env.regs[i]);
+    }
+}
+
+static void s390x_write_elf64_fpregset(Note *note, S390CPU *cpu)
+{
+    int i;
+    CPUS390XState *cs = &cpu->env;
+
+    note->hdr.n_type = cpu_to_be32(NT_FPREGSET);
+    note->contents.fpregset.fpc = cpu_to_be32(cpu->env.fpc);
+    for (i = 0; i <= 15; i++) {
+        note->contents.fpregset.fprs[i] = cpu_to_be64(get_freg(cs, i)->ll);
+    }
+}
+
+static void s390x_write_elf64_vregslo(Note *note, S390CPU *cpu)
+{
+    int i;
+
+    note->hdr.n_type = cpu_to_be32(NT_S390_VXRS_LOW);
+    for (i = 0; i <= 15; i++) {
+        note->contents.vregslo.vregs[i] = cpu_to_be64(cpu->env.vregs[i][1].ll);
+    }
+}
+
+static void s390x_write_elf64_vregshi(Note *note, S390CPU *cpu)
+{
+    int i;
+    S390xElfVregsHi *temp_vregshi;
+
+    temp_vregshi = &note->contents.vregshi;
+
+    note->hdr.n_type = cpu_to_be32(NT_S390_VXRS_HIGH);
+    for (i = 0; i <= 15; i++) {
+        temp_vregshi->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i + 16][0].ll);
+        temp_vregshi->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i + 16][1].ll);
+    }
+}
+
+static void s390x_write_elf64_timer(Note *note, S390CPU *cpu)
+{
+    note->hdr.n_type = cpu_to_be32(NT_S390_TIMER);
+    note->contents.timer = cpu_to_be64((uint64_t)(cpu->env.cputm));
+}
+
+static void s390x_write_elf64_todcmp(Note *note, S390CPU *cpu)
+{
+    note->hdr.n_type = cpu_to_be32(NT_S390_TODCMP);
+    note->contents.todcmp = cpu_to_be64((uint64_t)(cpu->env.ckc));
+}
+
+static void s390x_write_elf64_todpreg(Note *note, S390CPU *cpu)
+{
+    note->hdr.n_type = cpu_to_be32(NT_S390_TODPREG);
+    note->contents.todpreg = cpu_to_be32((uint32_t)(cpu->env.todpr));
+}
+
+static void s390x_write_elf64_ctrs(Note *note, S390CPU *cpu)
+{
+    int i;
+
+    note->hdr.n_type = cpu_to_be32(NT_S390_CTRS);
+
+    for (i = 0; i <= 15; i++) {
+        note->contents.ctrs[i] = cpu_to_be64(cpu->env.cregs[i]);
+    }
+}
+
+static void s390x_write_elf64_prefix(Note *note, S390CPU *cpu)
+{
+    note->hdr.n_type = cpu_to_be32(NT_S390_PREFIX);
+    note->contents.prefix = cpu_to_be32((uint32_t)(cpu->env.psa));
+}
+
+
+static const struct NoteFuncDescStruct {
+    int contents_size;
+    void (*note_contents_func)(Note *note, S390CPU *cpu);
+} note_func[] = {
+    {sizeof(((Note *)0)->contents.prstatus), s390x_write_elf64_prstatus},
+    {sizeof(((Note *)0)->contents.prefix),   s390x_write_elf64_prefix},
+    {sizeof(((Note *)0)->contents.fpregset), s390x_write_elf64_fpregset},
+    {sizeof(((Note *)0)->contents.ctrs),     s390x_write_elf64_ctrs},
+    {sizeof(((Note *)0)->contents.timer),    s390x_write_elf64_timer},
+    {sizeof(((Note *)0)->contents.todcmp),   s390x_write_elf64_todcmp},
+    {sizeof(((Note *)0)->contents.todpreg),  s390x_write_elf64_todpreg},
+    {sizeof(((Note *)0)->contents.vregslo),  s390x_write_elf64_vregslo},
+    {sizeof(((Note *)0)->contents.vregshi),  s390x_write_elf64_vregshi},
+    { 0, NULL}
+};
+
+typedef struct NoteFuncDescStruct NoteFuncDesc;
+
+
+static int s390x_write_all_elf64_notes(const char *note_name,
+                                       WriteCoreDumpFunction f,
+                                       S390CPU *cpu, int id,
+                                       void *opaque)
+{
+    Note note;
+    const NoteFuncDesc *nf;
+    int note_size;
+    int ret = -1;
+
+    for (nf = note_func; nf->note_contents_func; nf++) {
+        memset(&note, 0, sizeof(note));
+        note.hdr.n_namesz = cpu_to_be32(sizeof(note.name));
+        note.hdr.n_descsz = cpu_to_be32(nf->contents_size);
+        strncpy(note.name, note_name, sizeof(note.name));
+        (*nf->note_contents_func)(&note, cpu);
+
+        note_size = sizeof(note) - sizeof(note.contents) + nf->contents_size;
+        ret = f(&note, note_size, opaque);
+
+        if (ret < 0) {
+            return -1;
+        }
+
+    }
+
+    return 0;
+}
+
+
+int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+                              int cpuid, void *opaque)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    return s390x_write_all_elf64_notes("CORE", f, cpu, cpuid, opaque);
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+                      const struct GuestPhysBlockList *guest_phys_blocks)
+{
+    info->d_machine = EM_S390;
+    info->d_endian = ELFDATA2MSB;
+    info->d_class = ELFCLASS64;
+
+    return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+    int name_size = 8; /* "CORE" or "QEMU" rounded */
+    size_t elf_note_size = 0;
+    int note_head_size;
+    const NoteFuncDesc *nf;
+
+    assert(class == ELFCLASS64);
+    assert(machine == EM_S390);
+
+    note_head_size = sizeof(Elf64_Nhdr);
+
+    for (nf = note_func; nf->note_contents_func; nf++) {
+        elf_note_size = elf_note_size + note_head_size + name_size +
+                        nf->contents_size;
+    }
+
+    return (elf_note_size) * nr_cpus;
+}
+
+int s390_cpu_write_elf64_qemunote(WriteCoreDumpFunction f,
+                                  CPUState *cpu, void *opaque)
+{
+    return 0;
+}
diff --git a/qemu/target-s390x/cc_helper.c b/qemu/target-s390x/cc_helper.c
new file mode 100644
index 000000000..bfce3f1e6
--- /dev/null
+++ b/qemu/target-s390x/cc_helper.c
@@ -0,0 +1,568 @@
+/*
+ *  S/390 condition code helper routines
+ *
+ *  Copyright (c) 2009 Ulrich Hecht
+ *  Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst)
+{
+    if (src == dst) {
+        return 0;
+    } else if (src < dst) {
+        return 1;
+    } else {
+        return 2;
+    }
+}
+
+static uint32_t cc_calc_ltgt0_32(int32_t dst)
+{
+    return cc_calc_ltgt_32(dst, 0);
+}
+
+static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst)
+{
+    if (src == dst) {
+        return 0;
+    } else if (src < dst) {
+        return 1;
+    } else {
+        return 2;
+    }
+}
+
+static uint32_t cc_calc_ltgt0_64(int64_t dst)
+{
+    return cc_calc_ltgt_64(dst, 0);
+}
+
+static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst)
+{
+    if (src == dst) {
+        return 0;
+    } else if (src < dst) {
+        return 1;
+    } else {
+        return 2;
+    }
+}
+
+static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst)
+{
+    if (src == dst) {
+        return 0;
+    } else if (src < dst) {
+        return 1;
+    } else {
+        return 2;
+    }
+}
+
+static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask)
+{
+    uint32_t r = val & mask;
+
+    if (r == 0) {
+        return 0;
+    } else if (r == mask) {
+        return 3;
+    } else {
+        return 1;
+    }
+}
+
+static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask)
+{
+    uint64_t r = val & mask;
+
+    if (r == 0) {
+        return 0;
+    } else if (r == mask) {
+        return 3;
+    } else {
+        int top = clz64(mask);
+        if ((int64_t)(val << top) < 0) {
+            return 2;
+        } else {
+            return 1;
+        }
+    }
+}
+
+static uint32_t cc_calc_nz(uint64_t dst)
+{
+    return !!dst;
+}
+
+static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar)
+{
+    if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
+        return 3; /* overflow */
+    } else {
+        if (ar < 0) {
+            return 1;
+        } else if (ar > 0) {
+            return 2;
+        } else {
+            return 0;
+        }
+    }
+}
+
+static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar)
+{
+    return (ar != 0) + 2 * (ar < a1);
+}
+
+static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar)
+{
+    /* Recover a2 + carry_in.  */
+    uint64_t a2c = ar - a1;
+    /* Check for a2+carry_in overflow, then a1+a2c overflow.  */
+    int carry_out = (a2c < a2) || (ar < a1);
+
+    return (ar != 0) + 2 * carry_out;
+}
+
+static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar)
+{
+    if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
+        return 3; /* overflow */
+    } else {
+        if (ar < 0) {
+            return 1;
+        } else if (ar > 0) {
+            return 2;
+        } else {
+            return 0;
+        }
+    }
+}
+
+static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar)
+{
+    if (ar == 0) {
+        return 2;
+    } else {
+        if (a2 > a1) {
+            return 1;
+        } else {
+            return 3;
+        }
+    }
+}
+
+static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar)
+{
+    int borrow_out;
+
+    if (ar != a1 - a2) {	/* difference means borrow-in */
+        borrow_out = (a2 >= a1);
+    } else {
+        borrow_out = (a2 > a1);
+    }
+
+    return (ar != 0) + 2 * !borrow_out;
+}
+
+static uint32_t cc_calc_abs_64(int64_t dst)
+{
+    if ((uint64_t)dst == 0x8000000000000000ULL) {
+        return 3;
+    } else if (dst) {
+        return 2;
+    } else {
+        return 0;
+    }
+}
+
+static uint32_t cc_calc_nabs_64(int64_t dst)
+{
+    return !!dst;
+}
+
+static uint32_t cc_calc_comp_64(int64_t dst)
+{
+    if ((uint64_t)dst == 0x8000000000000000ULL) {
+        return 3;
+    } else if (dst < 0) {
+        return 1;
+    } else if (dst > 0) {
+        return 2;
+    } else {
+        return 0;
+    }
+}
+
+
+static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar)
+{
+    if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
+        return 3; /* overflow */
+    } else {
+        if (ar < 0) {
+            return 1;
+        } else if (ar > 0) {
+            return 2;
+        } else {
+            return 0;
+        }
+    }
+}
+
+static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar)
+{
+    return (ar != 0) + 2 * (ar < a1);
+}
+
+static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar)
+{
+    /* Recover a2 + carry_in.  */
+    uint32_t a2c = ar - a1;
+    /* Check for a2+carry_in overflow, then a1+a2c overflow.  */
+    int carry_out = (a2c < a2) || (ar < a1);
+
+    return (ar != 0) + 2 * carry_out;
+}
+
+static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar)
+{
+    if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
+        return 3; /* overflow */
+    } else {
+        if (ar < 0) {
+            return 1;
+        } else if (ar > 0) {
+            return 2;
+        } else {
+            return 0;
+        }
+    }
+}
+
+static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar)
+{
+    if (ar == 0) {
+        return 2;
+    } else {
+        if (a2 > a1) {
+            return 1;
+        } else {
+            return 3;
+        }
+    }
+}
+
+static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar)
+{
+    int borrow_out;
+
+    if (ar != a1 - a2) {	/* difference means borrow-in */
+        borrow_out = (a2 >= a1);
+    } else {
+        borrow_out = (a2 > a1);
+    }
+
+    return (ar != 0) + 2 * !borrow_out;
+}
+
+static uint32_t cc_calc_abs_32(int32_t dst)
+{
+    if ((uint32_t)dst == 0x80000000UL) {
+        return 3;
+    } else if (dst) {
+        return 2;
+    } else {
+        return 0;
+    }
+}
+
+static uint32_t cc_calc_nabs_32(int32_t dst)
+{
+    return !!dst;
+}
+
+static uint32_t cc_calc_comp_32(int32_t dst)
+{
+    if ((uint32_t)dst == 0x80000000UL) {
+        return 3;
+    } else if (dst < 0) {
+        return 1;
+    } else if (dst > 0) {
+        return 2;
+    } else {
+        return 0;
+    }
+}
+
+/* calculate condition code for insert character under mask insn */
+static uint32_t cc_calc_icm(uint64_t mask, uint64_t val)
+{
+    if ((val & mask) == 0) {
+        return 0;
+    } else {
+        int top = clz64(mask);
+        if ((int64_t)(val << top) < 0) {
+            return 1;
+        } else {
+            return 2;
+        }
+    }
+}
+
+static uint32_t cc_calc_sla_32(uint32_t src, int shift)
+{
+    uint32_t mask = ((1U << shift) - 1U) << (32 - shift);
+    uint32_t sign = 1U << 31;
+    uint32_t match;
+    int32_t r;
+
+    /* Check if the sign bit stays the same.  */
+    if (src & sign) {
+        match = mask;
+    } else {
+        match = 0;
+    }
+    if ((src & mask) != match) {
+        /* Overflow.  */
+        return 3;
+    }
+
+    r = ((src << shift) & ~sign) | (src & sign);
+    if (r == 0) {
+        return 0;
+    } else if (r < 0) {
+        return 1;
+    }
+    return 2;
+}
+
+static uint32_t cc_calc_sla_64(uint64_t src, int shift)
+{
+    uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
+    uint64_t sign = 1ULL << 63;
+    uint64_t match;
+    int64_t r;
+
+    /* Check if the sign bit stays the same.  */
+    if (src & sign) {
+        match = mask;
+    } else {
+        match = 0;
+    }
+    if ((src & mask) != match) {
+        /* Overflow.  */
+        return 3;
+    }
+
+    r = ((src << shift) & ~sign) | (src & sign);
+    if (r == 0) {
+        return 0;
+    } else if (r < 0) {
+        return 1;
+    }
+    return 2;
+}
+
+static uint32_t cc_calc_flogr(uint64_t dst)
+{
+    return dst ? 2 : 0;
+}
+
+static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
+                                  uint64_t src, uint64_t dst, uint64_t vr)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    uint32_t r = 0;
+
+    switch (cc_op) {
+    case CC_OP_CONST0:
+    case CC_OP_CONST1:
+    case CC_OP_CONST2:
+    case CC_OP_CONST3:
+        /* cc_op value _is_ cc */
+        r = cc_op;
+        break;
+    case CC_OP_LTGT0_32:
+        r = cc_calc_ltgt0_32(dst);
+        break;
+    case CC_OP_LTGT0_64:
+        r =  cc_calc_ltgt0_64(dst);
+        break;
+    case CC_OP_LTGT_32:
+        r =  cc_calc_ltgt_32(src, dst);
+        break;
+    case CC_OP_LTGT_64:
+        r =  cc_calc_ltgt_64(src, dst);
+        break;
+    case CC_OP_LTUGTU_32:
+        r =  cc_calc_ltugtu_32(src, dst);
+        break;
+    case CC_OP_LTUGTU_64:
+        r =  cc_calc_ltugtu_64(src, dst);
+        break;
+    case CC_OP_TM_32:
+        r =  cc_calc_tm_32(src, dst);
+        break;
+    case CC_OP_TM_64:
+        r =  cc_calc_tm_64(src, dst);
+        break;
+    case CC_OP_NZ:
+        r =  cc_calc_nz(dst);
+        break;
+    case CC_OP_ADD_64:
+        r =  cc_calc_add_64(src, dst, vr);
+        break;
+    case CC_OP_ADDU_64:
+        r =  cc_calc_addu_64(src, dst, vr);
+        break;
+    case CC_OP_ADDC_64:
+        r =  cc_calc_addc_64(src, dst, vr);
+        break;
+    case CC_OP_SUB_64:
+        r =  cc_calc_sub_64(src, dst, vr);
+        break;
+    case CC_OP_SUBU_64:
+        r =  cc_calc_subu_64(src, dst, vr);
+        break;
+    case CC_OP_SUBB_64:
+        r =  cc_calc_subb_64(src, dst, vr);
+        break;
+    case CC_OP_ABS_64:
+        r =  cc_calc_abs_64(dst);
+        break;
+    case CC_OP_NABS_64:
+        r =  cc_calc_nabs_64(dst);
+        break;
+    case CC_OP_COMP_64:
+        r =  cc_calc_comp_64(dst);
+        break;
+
+    case CC_OP_ADD_32:
+        r =  cc_calc_add_32(src, dst, vr);
+        break;
+    case CC_OP_ADDU_32:
+        r =  cc_calc_addu_32(src, dst, vr);
+        break;
+    case CC_OP_ADDC_32:
+        r =  cc_calc_addc_32(src, dst, vr);
+        break;
+    case CC_OP_SUB_32:
+        r =  cc_calc_sub_32(src, dst, vr);
+        break;
+    case CC_OP_SUBU_32:
+        r =  cc_calc_subu_32(src, dst, vr);
+        break;
+    case CC_OP_SUBB_32:
+        r =  cc_calc_subb_32(src, dst, vr);
+        break;
+    case CC_OP_ABS_32:
+        r =  cc_calc_abs_32(dst);
+        break;
+    case CC_OP_NABS_32:
+        r =  cc_calc_nabs_32(dst);
+        break;
+    case CC_OP_COMP_32:
+        r =  cc_calc_comp_32(dst);
+        break;
+
+    case CC_OP_ICM:
+        r =  cc_calc_icm(src, dst);
+        break;
+    case CC_OP_SLA_32:
+        r =  cc_calc_sla_32(src, dst);
+        break;
+    case CC_OP_SLA_64:
+        r =  cc_calc_sla_64(src, dst);
+        break;
+    case CC_OP_FLOGR:
+        r = cc_calc_flogr(dst);
+        break;
+
+    case CC_OP_NZ_F32:
+        r = set_cc_nz_f32(dst);
+        break;
+    case CC_OP_NZ_F64:
+        r = set_cc_nz_f64(dst);
+        break;
+    case CC_OP_NZ_F128:
+        r = set_cc_nz_f128(make_float128(src, dst));
+        break;
+
+    default:
+        cpu_abort(CPU(cpu), "Unknown CC operation: %s\n", cc_name(cc_op));
+    }
+
+    HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__,
+               cc_name(cc_op), src, dst, vr, r);
+    return r;
+}
+
+uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
+                 uint64_t vr)
+{
+    return do_calc_cc(env, cc_op, src, dst, vr);
+}
+
+uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src,
+                         uint64_t dst, uint64_t vr)
+{
+    return do_calc_cc(env, cc_op, src, dst, vr);
+}
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
+{
+    load_psw(env, mask, addr);
+    cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+}
+
+void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
+{
+    HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1);
+
+    switch (a1 & 0xf00) {
+    case 0x000:
+        env->psw.mask &= ~PSW_MASK_ASC;
+        env->psw.mask |= PSW_ASC_PRIMARY;
+        break;
+    case 0x100:
+        env->psw.mask &= ~PSW_MASK_ASC;
+        env->psw.mask |= PSW_ASC_SECONDARY;
+        break;
+    case 0x300:
+        env->psw.mask &= ~PSW_MASK_ASC;
+        env->psw.mask |= PSW_ASC_HOME;
+        break;
+    default:
+        qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
+        program_interrupt(env, PGM_SPECIFICATION, 2);
+        break;
+    }
+}
+#endif
diff --git a/qemu/target-s390x/cpu-qom.h b/qemu/target-s390x/cpu-qom.h
new file mode 100644
index 000000000..491c1b876
--- /dev/null
+++ b/qemu/target-s390x/cpu-qom.h
@@ -0,0 +1,103 @@
+/*
+ * QEMU S/390 CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_S390_CPU_QOM_H
+#define QEMU_S390_CPU_QOM_H
+
+#include "qom/cpu.h"
+#include "cpu.h"
+
+#define TYPE_S390_CPU "s390-cpu"
+
+#define S390_CPU_CLASS(klass) \
+    OBJECT_CLASS_CHECK(S390CPUClass, (klass), TYPE_S390_CPU)
+#define S390_CPU(obj) \
+    OBJECT_CHECK(S390CPU, (obj), TYPE_S390_CPU)
+#define S390_CPU_GET_CLASS(obj) \
+    OBJECT_GET_CLASS(S390CPUClass, (obj), TYPE_S390_CPU)
+
+/**
+ * S390CPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @load_normal: Performs a load normal.
+ * @cpu_reset: Performs a CPU reset.
+ * @initial_cpu_reset: Performs an initial CPU reset.
+ *
+ * An S/390 CPU model.
+ */
+typedef struct S390CPUClass {
+    /*< private >*/
+    CPUClass parent_class;
+    /*< public >*/
+
+    DeviceRealize parent_realize;
+    void (*parent_reset)(CPUState *cpu);
+    void (*load_normal)(CPUState *cpu);
+    void (*cpu_reset)(CPUState *cpu);
+    void (*initial_cpu_reset)(CPUState *cpu);
+} S390CPUClass;
+
+/**
+ * S390CPU:
+ * @env: #CPUS390XState.
+ *
+ * An S/390 CPU.
+ */
+typedef struct S390CPU {
+    /*< private >*/
+    CPUState parent_obj;
+    /*< public >*/
+
+    CPUS390XState env;
+    /* needed for live migration */
+    void *irqstate;
+    uint32_t irqstate_saved_size;
+} S390CPU;
+
+static inline S390CPU *s390_env_get_cpu(CPUS390XState *env)
+{
+    return container_of(env, S390CPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(s390_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(S390CPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_s390_cpu;
+#endif
+
+void s390_cpu_do_interrupt(CPUState *cpu);
+bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void s390_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
+                         int flags);
+int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+                              int cpuid, void *opaque);
+
+int s390_cpu_write_elf64_qemunote(WriteCoreDumpFunction f,
+                                  CPUState *cpu, void *opaque);
+hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr);
+int s390_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void s390_cpu_gdb_init(CPUState *cs);
+void s390x_cpu_debug_excp_handler(CPUState *cs);
+
+#endif
diff --git a/qemu/target-s390x/cpu.c b/qemu/target-s390x/cpu.c
new file mode 100644
index 000000000..c3e21b445
--- /dev/null
+++ b/qemu/target-s390x/cpu.c
@@ -0,0 +1,374 @@
+/*
+ * QEMU S/390 CPU
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2011 Alexander Graf
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * Copyright (c) 2012 IBM Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ * Contributions after 2012-12-11 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "cpu.h"
+#include "qemu-common.h"
+#include "qemu/timer.h"
+#include "qemu/error-report.h"
+#include "hw/hw.h"
+#include "trace.h"
+#ifndef CONFIG_USER_ONLY
+#include "sysemu/arch_init.h"
+#endif
+
+#define CR0_RESET       0xE0UL
+#define CR14_RESET      0xC2000000UL;
+
+/* generate CPU information for cpu -? */
+void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+#ifdef CONFIG_KVM
+    (*cpu_fprintf)(f, "s390 %16s\n", "host");
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
+{
+    CpuDefinitionInfoList *entry;
+    CpuDefinitionInfo *info;
+
+    info = g_malloc0(sizeof(*info));
+    info->name = g_strdup("host");
+
+    entry = g_malloc0(sizeof(*entry));
+    entry->value = info;
+
+    return entry;
+}
+#endif
+
+static void s390_cpu_set_pc(CPUState *cs, vaddr value)
+{
+    S390CPU *cpu = S390_CPU(cs);
+
+    cpu->env.psw.addr = value;
+}
+
+static bool s390_cpu_has_work(CPUState *cs)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
+
+    return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+           (env->psw.mask & PSW_MASK_EXT);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+/* S390CPUClass::load_normal() */
+static void s390_cpu_load_normal(CPUState *s)
+{
+    S390CPU *cpu = S390_CPU(s);
+    cpu->env.psw.addr = ldl_phys(s->as, 4) & PSW_MASK_ESA_ADDR;
+    cpu->env.psw.mask = PSW_MASK_32 | PSW_MASK_64;
+    s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
+}
+#endif
+
+/* S390CPUClass::cpu_reset() */
+static void s390_cpu_reset(CPUState *s)
+{
+    S390CPU *cpu = S390_CPU(s);
+    S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+    CPUS390XState *env = &cpu->env;
+
+    env->pfault_token = -1UL;
+    scc->parent_reset(s);
+    cpu->env.sigp_order = 0;
+    s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
+    tlb_flush(s, 1);
+}
+
+/* S390CPUClass::initial_reset() */
+static void s390_cpu_initial_reset(CPUState *s)
+{
+    S390CPU *cpu = S390_CPU(s);
+    CPUS390XState *env = &cpu->env;
+    int i;
+
+    s390_cpu_reset(s);
+    /* initial reset does not touch regs,fregs and aregs */
+    memset(&env->fpc, 0, offsetof(CPUS390XState, cpu_num) -
+                         offsetof(CPUS390XState, fpc));
+
+    /* architectured initial values for CR 0 and 14 */
+    env->cregs[0] = CR0_RESET;
+    env->cregs[14] = CR14_RESET;
+
+    /* architectured initial value for Breaking-Event-Address register */
+    env->gbea = 1;
+
+    env->pfault_token = -1UL;
+    env->ext_index = -1;
+    for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
+        env->io_index[i] = -1;
+    }
+
+    /* tininess for underflow is detected before rounding */
+    set_float_detect_tininess(float_tininess_before_rounding,
+                              &env->fpu_status);
+
+    /* Reset state inside the kernel that we cannot access yet from QEMU. */
+    if (kvm_enabled()) {
+        kvm_s390_reset_vcpu(cpu);
+    }
+    tlb_flush(s, 1);
+}
+
+/* CPUClass:reset() */
+static void s390_cpu_full_reset(CPUState *s)
+{
+    S390CPU *cpu = S390_CPU(s);
+    S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+    CPUS390XState *env = &cpu->env;
+    int i;
+
+    scc->parent_reset(s);
+    cpu->env.sigp_order = 0;
+    s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
+
+    memset(env, 0, offsetof(CPUS390XState, cpu_num));
+
+    /* architectured initial values for CR 0 and 14 */
+    env->cregs[0] = CR0_RESET;
+    env->cregs[14] = CR14_RESET;
+
+    /* architectured initial value for Breaking-Event-Address register */
+    env->gbea = 1;
+
+    env->pfault_token = -1UL;
+    env->ext_index = -1;
+    for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
+        env->io_index[i] = -1;
+    }
+
+    /* tininess for underflow is detected before rounding */
+    set_float_detect_tininess(float_tininess_before_rounding,
+                              &env->fpu_status);
+
+    /* Reset state inside the kernel that we cannot access yet from QEMU. */
+    if (kvm_enabled()) {
+        kvm_s390_reset_vcpu(cpu);
+    }
+    tlb_flush(s, 1);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void s390_cpu_machine_reset_cb(void *opaque)
+{
+    S390CPU *cpu = opaque;
+
+    run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, CPU(cpu));
+}
+#endif
+
+static void s390_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+    CPUState *cs = CPU(dev);
+    S390CPUClass *scc = S390_CPU_GET_CLASS(dev);
+
+    s390_cpu_gdb_init(cs);
+    qemu_init_vcpu(cs);
+#if !defined(CONFIG_USER_ONLY)
+    run_on_cpu(cs, s390_do_cpu_full_reset, cs);
+#else
+    cpu_reset(cs);
+#endif
+
+    scc->parent_realize(dev, errp);
+}
+
+static void s390_cpu_initfn(Object *obj)
+{
+    CPUState *cs = CPU(obj);
+    S390CPU *cpu = S390_CPU(obj);
+    CPUS390XState *env = &cpu->env;
+    static bool inited;
+    static int cpu_num = 0;
+#if !defined(CONFIG_USER_ONLY)
+    struct tm tm;
+#endif
+
+    cs->env_ptr = env;
+    cpu_exec_init(cs, &error_abort);
+#if !defined(CONFIG_USER_ONLY)
+    qemu_register_reset(s390_cpu_machine_reset_cb, cpu);
+    qemu_get_timedate(&tm, 0);
+    env->tod_offset = TOD_UNIX_EPOCH +
+                      (time2tod(mktimegm(&tm)) * 1000000000ULL);
+    env->tod_basetime = 0;
+    env->tod_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu);
+    env->cpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu);
+    s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
+#endif
+    env->cpu_num = cpu_num++;
+
+    if (tcg_enabled() && !inited) {
+        inited = true;
+        s390x_translate_init();
+    }
+}
+
+static void s390_cpu_finalize(Object *obj)
+{
+#if !defined(CONFIG_USER_ONLY)
+    S390CPU *cpu = S390_CPU(obj);
+
+    qemu_unregister_reset(s390_cpu_machine_reset_cb, cpu);
+    g_free(cpu->irqstate);
+#endif
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static bool disabled_wait(CPUState *cpu)
+{
+    return cpu->halted && !(S390_CPU(cpu)->env.psw.mask &
+                            (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK));
+}
+
+static unsigned s390_count_running_cpus(void)
+{
+    CPUState *cpu;
+    int nr_running = 0;
+
+    CPU_FOREACH(cpu) {
+        uint8_t state = S390_CPU(cpu)->env.cpu_state;
+        if (state == CPU_STATE_OPERATING ||
+            state == CPU_STATE_LOAD) {
+            if (!disabled_wait(cpu)) {
+                nr_running++;
+            }
+        }
+    }
+
+    return nr_running;
+}
+
+unsigned int s390_cpu_halt(S390CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    trace_cpu_halt(cs->cpu_index);
+
+    if (!cs->halted) {
+        cs->halted = 1;
+        cs->exception_index = EXCP_HLT;
+    }
+
+    return s390_count_running_cpus();
+}
+
+void s390_cpu_unhalt(S390CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    trace_cpu_unhalt(cs->cpu_index);
+
+    if (cs->halted) {
+        cs->halted = 0;
+        cs->exception_index = -1;
+    }
+}
+
+unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
+ {
+    trace_cpu_set_state(CPU(cpu)->cpu_index, cpu_state);
+
+    switch (cpu_state) {
+    case CPU_STATE_STOPPED:
+    case CPU_STATE_CHECK_STOP:
+        /* halt the cpu for common infrastructure */
+        s390_cpu_halt(cpu);
+        break;
+    case CPU_STATE_OPERATING:
+    case CPU_STATE_LOAD:
+        /* unhalt the cpu for common infrastructure */
+        s390_cpu_unhalt(cpu);
+        break;
+    default:
+        error_report("Requested CPU state is not a valid S390 CPU state: %u",
+                     cpu_state);
+        exit(1);
+    }
+    if (kvm_enabled() && cpu->env.cpu_state != cpu_state) {
+        kvm_s390_set_cpu_state(cpu, cpu_state);
+    }
+    cpu->env.cpu_state = cpu_state;
+
+    return s390_count_running_cpus();
+}
+#endif
+
+static void s390_cpu_class_init(ObjectClass *oc, void *data)
+{
+    S390CPUClass *scc = S390_CPU_CLASS(oc);
+    CPUClass *cc = CPU_CLASS(scc);
+    DeviceClass *dc = DEVICE_CLASS(oc);
+
+    scc->parent_realize = dc->realize;
+    dc->realize = s390_cpu_realizefn;
+
+    scc->parent_reset = cc->reset;
+#if !defined(CONFIG_USER_ONLY)
+    scc->load_normal = s390_cpu_load_normal;
+#endif
+    scc->cpu_reset = s390_cpu_reset;
+    scc->initial_cpu_reset = s390_cpu_initial_reset;
+    cc->reset = s390_cpu_full_reset;
+    cc->has_work = s390_cpu_has_work;
+    cc->do_interrupt = s390_cpu_do_interrupt;
+    cc->dump_state = s390_cpu_dump_state;
+    cc->set_pc = s390_cpu_set_pc;
+    cc->gdb_read_register = s390_cpu_gdb_read_register;
+    cc->gdb_write_register = s390_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+    cc->handle_mmu_fault = s390_cpu_handle_mmu_fault;
+#else
+    cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
+    cc->vmsd = &vmstate_s390_cpu;
+    cc->write_elf64_note = s390_cpu_write_elf64_note;
+    cc->write_elf64_qemunote = s390_cpu_write_elf64_qemunote;
+    cc->cpu_exec_interrupt = s390_cpu_exec_interrupt;
+    cc->debug_excp_handler = s390x_cpu_debug_excp_handler;
+#endif
+    cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
+    cc->gdb_core_xml_file = "s390x-core64.xml";
+}
+
+static const TypeInfo s390_cpu_type_info = {
+    .name = TYPE_S390_CPU,
+    .parent = TYPE_CPU,
+    .instance_size = sizeof(S390CPU),
+    .instance_init = s390_cpu_initfn,
+    .instance_finalize = s390_cpu_finalize,
+    .abstract = false,
+    .class_size = sizeof(S390CPUClass),
+    .class_init = s390_cpu_class_init,
+};
+
+static void s390_cpu_register_types(void)
+{
+    type_register_static(&s390_cpu_type_info);
+}
+
+type_init(s390_cpu_register_types)
diff --git a/qemu/target-s390x/cpu.h b/qemu/target-s390x/cpu.h
new file mode 100644
index 000000000..63aebf484
--- /dev/null
+++ b/qemu/target-s390x/cpu.h
@@ -0,0 +1,1281 @@
+/*
+ * S/390 virtual CPU header
+ *
+ *  Copyright (c) 2009 Ulrich Hecht
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * Contributions after 2012-10-29 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ *
+ * You should have received a copy of the GNU (Lesser) General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_S390X_H
+#define CPU_S390X_H
+
+#include "config.h"
+#include "qemu-common.h"
+
+#define TARGET_LONG_BITS 64
+
+#define ELF_MACHINE	EM_S390
+#define ELF_MACHINE_UNAME "S390X"
+
+#define CPUArchState struct CPUS390XState
+
+#include "exec/cpu-defs.h"
+#define TARGET_PAGE_BITS 12
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 64
+#define TARGET_VIRT_ADDR_SPACE_BITS 64
+
+#include "exec/cpu-all.h"
+
+#include "fpu/softfloat.h"
+
+#define NB_MMU_MODES 3
+
+#define MMU_MODE0_SUFFIX _primary
+#define MMU_MODE1_SUFFIX _secondary
+#define MMU_MODE2_SUFFIX _home
+
+#define MMU_USER_IDX 0
+
+#define MAX_EXT_QUEUE 16
+#define MAX_IO_QUEUE 16
+#define MAX_MCHK_QUEUE 16
+
+#define PSW_MCHK_MASK 0x0004000000000000
+#define PSW_IO_MASK 0x0200000000000000
+
+typedef struct PSW {
+    uint64_t mask;
+    uint64_t addr;
+} PSW;
+
+typedef struct ExtQueue {
+    uint32_t code;
+    uint32_t param;
+    uint32_t param64;
+} ExtQueue;
+
+typedef struct IOIntQueue {
+    uint16_t id;
+    uint16_t nr;
+    uint32_t parm;
+    uint32_t word;
+} IOIntQueue;
+
+typedef struct MchkQueue {
+    uint16_t type;
+} MchkQueue;
+
+typedef struct CPUS390XState {
+    uint64_t regs[16];     /* GP registers */
+    /*
+     * The floating point registers are part of the vector registers.
+     * vregs[0][0] -> vregs[15][0] are 16 floating point registers
+     */
+    CPU_DoubleU vregs[32][2];  /* vector registers */
+    uint32_t aregs[16];    /* access registers */
+
+    uint32_t fpc;          /* floating-point control register */
+    uint32_t cc_op;
+
+    float_status fpu_status; /* passed to softfloat lib */
+
+    /* The low part of a 128-bit return, or remainder of a divide.  */
+    uint64_t retxl;
+
+    PSW psw;
+
+    uint64_t cc_src;
+    uint64_t cc_dst;
+    uint64_t cc_vr;
+
+    uint64_t __excp_addr;
+    uint64_t psa;
+
+    uint32_t int_pgm_code;
+    uint32_t int_pgm_ilen;
+
+    uint32_t int_svc_code;
+    uint32_t int_svc_ilen;
+
+    uint64_t per_address;
+    uint16_t per_perc_atmid;
+
+    uint64_t cregs[16]; /* control registers */
+
+    ExtQueue ext_queue[MAX_EXT_QUEUE];
+    IOIntQueue io_queue[MAX_IO_QUEUE][8];
+    MchkQueue mchk_queue[MAX_MCHK_QUEUE];
+
+    int pending_int;
+    int ext_index;
+    int io_index[8];
+    int mchk_index;
+
+    uint64_t ckc;
+    uint64_t cputm;
+    uint32_t todpr;
+
+    uint64_t pfault_token;
+    uint64_t pfault_compare;
+    uint64_t pfault_select;
+
+    uint64_t gbea;
+    uint64_t pp;
+
+    CPU_COMMON
+
+    /* reset does memset(0) up to here */
+
+    uint32_t cpu_num;
+    uint32_t machine_type;
+
+    uint8_t *storage_keys;
+
+    uint64_t tod_offset;
+    uint64_t tod_basetime;
+    QEMUTimer *tod_timer;
+
+    QEMUTimer *cpu_timer;
+
+    /*
+     * The cpu state represents the logical state of a cpu. In contrast to other
+     * architectures, there is a difference between a halt and a stop on s390.
+     * If all cpus are either stopped (including check stop) or in the disabled
+     * wait state, the vm can be shut down.
+     */
+#define CPU_STATE_UNINITIALIZED        0x00
+#define CPU_STATE_STOPPED              0x01
+#define CPU_STATE_CHECK_STOP           0x02
+#define CPU_STATE_OPERATING            0x03
+#define CPU_STATE_LOAD                 0x04
+    uint8_t cpu_state;
+
+    /* currently processed sigp order */
+    uint8_t sigp_order;
+
+} CPUS390XState;
+
+static inline CPU_DoubleU *get_freg(CPUS390XState *cs, int nr)
+{
+    return &cs->vregs[nr][0];
+}
+
+#include "cpu-qom.h"
+#include <sysemu/kvm.h>
+
+/* distinguish between 24 bit and 31 bit addressing */
+#define HIGH_ORDER_BIT 0x80000000
+
+/* Interrupt Codes */
+/* Program Interrupts */
+#define PGM_OPERATION                   0x0001
+#define PGM_PRIVILEGED                  0x0002
+#define PGM_EXECUTE                     0x0003
+#define PGM_PROTECTION                  0x0004
+#define PGM_ADDRESSING                  0x0005
+#define PGM_SPECIFICATION               0x0006
+#define PGM_DATA                        0x0007
+#define PGM_FIXPT_OVERFLOW              0x0008
+#define PGM_FIXPT_DIVIDE                0x0009
+#define PGM_DEC_OVERFLOW                0x000a
+#define PGM_DEC_DIVIDE                  0x000b
+#define PGM_HFP_EXP_OVERFLOW            0x000c
+#define PGM_HFP_EXP_UNDERFLOW           0x000d
+#define PGM_HFP_SIGNIFICANCE            0x000e
+#define PGM_HFP_DIVIDE                  0x000f
+#define PGM_SEGMENT_TRANS               0x0010
+#define PGM_PAGE_TRANS                  0x0011
+#define PGM_TRANS_SPEC                  0x0012
+#define PGM_SPECIAL_OP                  0x0013
+#define PGM_OPERAND                     0x0015
+#define PGM_TRACE_TABLE                 0x0016
+#define PGM_SPACE_SWITCH                0x001c
+#define PGM_HFP_SQRT                    0x001d
+#define PGM_PC_TRANS_SPEC               0x001f
+#define PGM_AFX_TRANS                   0x0020
+#define PGM_ASX_TRANS                   0x0021
+#define PGM_LX_TRANS                    0x0022
+#define PGM_EX_TRANS                    0x0023
+#define PGM_PRIM_AUTH                   0x0024
+#define PGM_SEC_AUTH                    0x0025
+#define PGM_ALET_SPEC                   0x0028
+#define PGM_ALEN_SPEC                   0x0029
+#define PGM_ALE_SEQ                     0x002a
+#define PGM_ASTE_VALID                  0x002b
+#define PGM_ASTE_SEQ                    0x002c
+#define PGM_EXT_AUTH                    0x002d
+#define PGM_STACK_FULL                  0x0030
+#define PGM_STACK_EMPTY                 0x0031
+#define PGM_STACK_SPEC                  0x0032
+#define PGM_STACK_TYPE                  0x0033
+#define PGM_STACK_OP                    0x0034
+#define PGM_ASCE_TYPE                   0x0038
+#define PGM_REG_FIRST_TRANS             0x0039
+#define PGM_REG_SEC_TRANS               0x003a
+#define PGM_REG_THIRD_TRANS             0x003b
+#define PGM_MONITOR                     0x0040
+#define PGM_PER                         0x0080
+#define PGM_CRYPTO                      0x0119
+
+/* External Interrupts */
+#define EXT_INTERRUPT_KEY               0x0040
+#define EXT_CLOCK_COMP                  0x1004
+#define EXT_CPU_TIMER                   0x1005
+#define EXT_MALFUNCTION                 0x1200
+#define EXT_EMERGENCY                   0x1201
+#define EXT_EXTERNAL_CALL               0x1202
+#define EXT_ETR                         0x1406
+#define EXT_SERVICE                     0x2401
+#define EXT_VIRTIO                      0x2603
+
+/* PSW defines */
+#undef PSW_MASK_PER
+#undef PSW_MASK_DAT
+#undef PSW_MASK_IO
+#undef PSW_MASK_EXT
+#undef PSW_MASK_KEY
+#undef PSW_SHIFT_KEY
+#undef PSW_MASK_MCHECK
+#undef PSW_MASK_WAIT
+#undef PSW_MASK_PSTATE
+#undef PSW_MASK_ASC
+#undef PSW_MASK_CC
+#undef PSW_MASK_PM
+#undef PSW_MASK_64
+#undef PSW_MASK_32
+#undef PSW_MASK_ESA_ADDR
+
+#define PSW_MASK_PER            0x4000000000000000ULL
+#define PSW_MASK_DAT            0x0400000000000000ULL
+#define PSW_MASK_IO             0x0200000000000000ULL
+#define PSW_MASK_EXT            0x0100000000000000ULL
+#define PSW_MASK_KEY            0x00F0000000000000ULL
+#define PSW_SHIFT_KEY           56
+#define PSW_MASK_MCHECK         0x0004000000000000ULL
+#define PSW_MASK_WAIT           0x0002000000000000ULL
+#define PSW_MASK_PSTATE         0x0001000000000000ULL
+#define PSW_MASK_ASC            0x0000C00000000000ULL
+#define PSW_MASK_CC             0x0000300000000000ULL
+#define PSW_MASK_PM             0x00000F0000000000ULL
+#define PSW_MASK_64             0x0000000100000000ULL
+#define PSW_MASK_32             0x0000000080000000ULL
+#define PSW_MASK_ESA_ADDR       0x000000007fffffffULL
+
+#undef PSW_ASC_PRIMARY
+#undef PSW_ASC_ACCREG
+#undef PSW_ASC_SECONDARY
+#undef PSW_ASC_HOME
+
+#define PSW_ASC_PRIMARY         0x0000000000000000ULL
+#define PSW_ASC_ACCREG          0x0000400000000000ULL
+#define PSW_ASC_SECONDARY       0x0000800000000000ULL
+#define PSW_ASC_HOME            0x0000C00000000000ULL
+
+/* tb flags */
+
+#define FLAG_MASK_PER           (PSW_MASK_PER    >> 32)
+#define FLAG_MASK_DAT           (PSW_MASK_DAT    >> 32)
+#define FLAG_MASK_IO            (PSW_MASK_IO     >> 32)
+#define FLAG_MASK_EXT           (PSW_MASK_EXT    >> 32)
+#define FLAG_MASK_KEY           (PSW_MASK_KEY    >> 32)
+#define FLAG_MASK_MCHECK        (PSW_MASK_MCHECK >> 32)
+#define FLAG_MASK_WAIT          (PSW_MASK_WAIT   >> 32)
+#define FLAG_MASK_PSTATE        (PSW_MASK_PSTATE >> 32)
+#define FLAG_MASK_ASC           (PSW_MASK_ASC    >> 32)
+#define FLAG_MASK_CC            (PSW_MASK_CC     >> 32)
+#define FLAG_MASK_PM            (PSW_MASK_PM     >> 32)
+#define FLAG_MASK_64            (PSW_MASK_64     >> 32)
+#define FLAG_MASK_32            0x00001000
+
+/* Control register 0 bits */
+#define CR0_LOWPROT             0x0000000010000000ULL
+#define CR0_EDAT                0x0000000000800000ULL
+
+/* MMU */
+#define MMU_PRIMARY_IDX         0
+#define MMU_SECONDARY_IDX       1
+#define MMU_HOME_IDX            2
+
+static inline int cpu_mmu_index (CPUS390XState *env)
+{
+    switch (env->psw.mask & PSW_MASK_ASC) {
+    case PSW_ASC_PRIMARY:
+        return MMU_PRIMARY_IDX;
+    case PSW_ASC_SECONDARY:
+        return MMU_SECONDARY_IDX;
+    case PSW_ASC_HOME:
+        return MMU_HOME_IDX;
+    case PSW_ASC_ACCREG:
+        /* Fallthrough: access register mode is not yet supported */
+    default:
+        abort();
+    }
+}
+
+static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
+{
+    switch (mmu_idx) {
+    case MMU_PRIMARY_IDX:
+        return PSW_ASC_PRIMARY;
+    case MMU_SECONDARY_IDX:
+        return PSW_ASC_SECONDARY;
+    case MMU_HOME_IDX:
+        return PSW_ASC_HOME;
+    default:
+        abort();
+    }
+}
+
+static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
+                                        target_ulong *cs_base, int *flags)
+{
+    *pc = env->psw.addr;
+    *cs_base = 0;
+    *flags = ((env->psw.mask >> 32) & ~FLAG_MASK_CC) |
+             ((env->psw.mask & PSW_MASK_32) ? FLAG_MASK_32 : 0);
+}
+
+/* While the PoO talks about ILC (a number between 1-3) what is actually
+   stored in LowCore is shifted left one bit (an even between 2-6).  As
+   this is the actual length of the insn and therefore more useful, that
+   is what we want to pass around and manipulate.  To make sure that we
+   have applied this distinction universally, rename the "ILC" to "ILEN".  */
+static inline int get_ilen(uint8_t opc)
+{
+    switch (opc >> 6) {
+    case 0:
+        return 2;
+    case 1:
+    case 2:
+        return 4;
+    default:
+        return 6;
+    }
+}
+
+/* PER bits from control register 9 */
+#define PER_CR9_EVENT_BRANCH           0x80000000
+#define PER_CR9_EVENT_IFETCH           0x40000000
+#define PER_CR9_EVENT_STORE            0x20000000
+#define PER_CR9_EVENT_STORE_REAL       0x08000000
+#define PER_CR9_EVENT_NULLIFICATION    0x01000000
+#define PER_CR9_CONTROL_BRANCH_ADDRESS 0x00800000
+#define PER_CR9_CONTROL_ALTERATION     0x00200000
+
+/* PER bits from the PER CODE/ATMID/AI in lowcore */
+#define PER_CODE_EVENT_BRANCH          0x8000
+#define PER_CODE_EVENT_IFETCH          0x4000
+#define PER_CODE_EVENT_STORE           0x2000
+#define PER_CODE_EVENT_STORE_REAL      0x0800
+#define PER_CODE_EVENT_NULLIFICATION   0x0100
+
+/* Compute the ATMID field that is stored in the per_perc_atmid lowcore
+   entry when a PER exception is triggered.  */
+static inline uint8_t get_per_atmid(CPUS390XState *env)
+{
+    return ((env->psw.mask & PSW_MASK_64) ?      (1 << 7) : 0) |
+           (                                     (1 << 6)    ) |
+           ((env->psw.mask & PSW_MASK_32) ?      (1 << 5) : 0) |
+           ((env->psw.mask & PSW_MASK_DAT)?      (1 << 4) : 0) |
+           ((env->psw.mask & PSW_ASC_SECONDARY)? (1 << 3) : 0) |
+           ((env->psw.mask & PSW_ASC_ACCREG)?    (1 << 2) : 0);
+}
+
+/* Check if an address is within the PER starting address and the PER
+   ending address.  The address range might loop.  */
+static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
+{
+    if (env->cregs[10] <= env->cregs[11]) {
+        return env->cregs[10] <= addr && addr <= env->cregs[11];
+    } else {
+        return env->cregs[10] <= addr || addr <= env->cregs[11];
+    }
+}
+
+#ifndef CONFIG_USER_ONLY
+/* In several cases of runtime exceptions, we havn't recorded the true
+   instruction length.  Use these codes when raising exceptions in order
+   to re-compute the length by examining the insn in memory.  */
+#define ILEN_LATER       0x20
+#define ILEN_LATER_INC   0x21
+void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen);
+#endif
+
+S390CPU *cpu_s390x_init(const char *cpu_model);
+void s390x_translate_init(void);
+int cpu_s390x_exec(CPUState *cpu);
+
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+   signal handlers to inform the virtual CPU of exceptions. non zero
+   is returned if the signal was handled by the virtual CPU.  */
+int cpu_s390x_signal_handler(int host_signum, void *pinfo,
+                           void *puc);
+int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+                              int mmu_idx);
+
+#include "ioinst.h"
+
+
+#ifndef CONFIG_USER_ONLY
+void do_restart_interrupt(CPUS390XState *env);
+
+static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
+                                       uint8_t *ar)
+{
+    hwaddr addr = 0;
+    uint8_t reg;
+
+    reg = ipb >> 28;
+    if (reg > 0) {
+        addr = env->regs[reg];
+    }
+    addr += (ipb >> 16) & 0xfff;
+    if (ar) {
+        *ar = reg;
+    }
+
+    return addr;
+}
+
+/* Base/displacement are at the same locations. */
+#define decode_basedisp_rs decode_basedisp_s
+
+/* helper functions for run_on_cpu() */
+static inline void s390_do_cpu_reset(void *arg)
+{
+    CPUState *cs = arg;
+    S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
+
+    scc->cpu_reset(cs);
+}
+static inline void s390_do_cpu_full_reset(void *arg)
+{
+    CPUState *cs = arg;
+
+    cpu_reset(cs);
+}
+
+void s390x_tod_timer(void *opaque);
+void s390x_cpu_timer(void *opaque);
+
+int s390_virtio_hypercall(CPUS390XState *env);
+void s390_virtio_irq(int config_change, uint64_t token);
+
+#ifdef CONFIG_KVM
+void kvm_s390_virtio_irq(int config_change, uint64_t token);
+void kvm_s390_service_interrupt(uint32_t parm);
+void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq);
+void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq);
+int kvm_s390_inject_flic(struct kvm_s390_irq *irq);
+void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
+int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
+                    int len, bool is_write);
+int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock);
+int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock);
+#else
+static inline void kvm_s390_virtio_irq(int config_change, uint64_t token)
+{
+}
+static inline void kvm_s390_service_interrupt(uint32_t parm)
+{
+}
+static inline int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+    return -ENOSYS;
+}
+static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+    return -ENOSYS;
+}
+static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar,
+                                  void *hostbuf, int len, bool is_write)
+{
+    return -ENOSYS;
+}
+static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code,
+                                             uint64_t te_code)
+{
+}
+#endif
+
+static inline int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+    if (kvm_enabled()) {
+        return kvm_s390_get_clock(tod_high, tod_low);
+    }
+    /* Fixme TCG */
+    *tod_high = 0;
+    *tod_low = 0;
+    return 0;
+}
+
+static inline int s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+    if (kvm_enabled()) {
+        return kvm_s390_set_clock(tod_high, tod_low);
+    }
+    /* Fixme TCG */
+    return 0;
+}
+
+S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
+unsigned int s390_cpu_halt(S390CPU *cpu);
+void s390_cpu_unhalt(S390CPU *cpu);
+unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu);
+static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
+{
+    return cpu->env.cpu_state;
+}
+
+void gtod_save(QEMUFile *f, void *opaque);
+int gtod_load(QEMUFile *f, void *opaque, int version_id);
+
+/* service interrupts are floating therefore we must not pass an cpustate */
+void s390_sclp_extint(uint32_t parm);
+
+/* from s390-virtio-bus */
+extern const hwaddr virtio_size;
+
+#else
+static inline unsigned int s390_cpu_halt(S390CPU *cpu)
+{
+    return 0;
+}
+
+static inline void s390_cpu_unhalt(S390CPU *cpu)
+{
+}
+
+static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
+{
+    return 0;
+}
+#endif
+void cpu_lock(void);
+void cpu_unlock(void);
+
+typedef struct SubchDev SubchDev;
+
+#ifndef CONFIG_USER_ONLY
+extern void io_subsystem_reset(void);
+SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid,
+                         uint16_t schid);
+bool css_subch_visible(SubchDev *sch);
+void css_conditional_io_interrupt(SubchDev *sch);
+int css_do_stsch(SubchDev *sch, SCHIB *schib);
+bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid);
+int css_do_msch(SubchDev *sch, const SCHIB *schib);
+int css_do_xsch(SubchDev *sch);
+int css_do_csch(SubchDev *sch);
+int css_do_hsch(SubchDev *sch);
+int css_do_ssch(SubchDev *sch, ORB *orb);
+int css_do_tsch_get_irb(SubchDev *sch, IRB *irb, int *irb_len);
+void css_do_tsch_update_subch(SubchDev *sch);
+int css_do_stcrw(CRW *crw);
+void css_undo_stcrw(CRW *crw);
+int css_do_tpi(IOIntCode *int_code, int lowcore);
+int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
+                         int rfmt, void *buf);
+void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo);
+int css_enable_mcsse(void);
+int css_enable_mss(void);
+int css_do_rsch(SubchDev *sch);
+int css_do_rchp(uint8_t cssid, uint8_t chpid);
+bool css_present(uint8_t cssid);
+#endif
+
+#define cpu_init(model) CPU(cpu_s390x_init(model))
+#define cpu_exec cpu_s390x_exec
+#define cpu_gen_code cpu_s390x_gen_code
+#define cpu_signal_handler cpu_s390x_signal_handler
+
+void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+#define cpu_list s390_cpu_list
+
+#include "exec/exec-all.h"
+
+#define EXCP_EXT 1 /* external interrupt */
+#define EXCP_SVC 2 /* supervisor call (syscall) */
+#define EXCP_PGM 3 /* program interruption */
+#define EXCP_IO  7 /* I/O interrupt */
+#define EXCP_MCHK 8 /* machine check */
+
+#define INTERRUPT_EXT        (1 << 0)
+#define INTERRUPT_TOD        (1 << 1)
+#define INTERRUPT_CPUTIMER   (1 << 2)
+#define INTERRUPT_IO         (1 << 3)
+#define INTERRUPT_MCHK       (1 << 4)
+
+/* Program Status Word.  */
+#define S390_PSWM_REGNUM 0
+#define S390_PSWA_REGNUM 1
+/* General Purpose Registers.  */
+#define S390_R0_REGNUM 2
+#define S390_R1_REGNUM 3
+#define S390_R2_REGNUM 4
+#define S390_R3_REGNUM 5
+#define S390_R4_REGNUM 6
+#define S390_R5_REGNUM 7
+#define S390_R6_REGNUM 8
+#define S390_R7_REGNUM 9
+#define S390_R8_REGNUM 10
+#define S390_R9_REGNUM 11
+#define S390_R10_REGNUM 12
+#define S390_R11_REGNUM 13
+#define S390_R12_REGNUM 14
+#define S390_R13_REGNUM 15
+#define S390_R14_REGNUM 16
+#define S390_R15_REGNUM 17
+/* Total Core Registers. */
+#define S390_NUM_CORE_REGS 18
+
+/* CC optimization */
+
+enum cc_op {
+    CC_OP_CONST0 = 0,           /* CC is 0 */
+    CC_OP_CONST1,               /* CC is 1 */
+    CC_OP_CONST2,               /* CC is 2 */
+    CC_OP_CONST3,               /* CC is 3 */
+
+    CC_OP_DYNAMIC,              /* CC calculation defined by env->cc_op */
+    CC_OP_STATIC,               /* CC value is env->cc_op */
+
+    CC_OP_NZ,                   /* env->cc_dst != 0 */
+    CC_OP_LTGT_32,              /* signed less/greater than (32bit) */
+    CC_OP_LTGT_64,              /* signed less/greater than (64bit) */
+    CC_OP_LTUGTU_32,            /* unsigned less/greater than (32bit) */
+    CC_OP_LTUGTU_64,            /* unsigned less/greater than (64bit) */
+    CC_OP_LTGT0_32,             /* signed less/greater than 0 (32bit) */
+    CC_OP_LTGT0_64,             /* signed less/greater than 0 (64bit) */
+
+    CC_OP_ADD_64,               /* overflow on add (64bit) */
+    CC_OP_ADDU_64,              /* overflow on unsigned add (64bit) */
+    CC_OP_ADDC_64,              /* overflow on unsigned add-carry (64bit) */
+    CC_OP_SUB_64,               /* overflow on subtraction (64bit) */
+    CC_OP_SUBU_64,              /* overflow on unsigned subtraction (64bit) */
+    CC_OP_SUBB_64,              /* overflow on unsigned sub-borrow (64bit) */
+    CC_OP_ABS_64,               /* sign eval on abs (64bit) */
+    CC_OP_NABS_64,              /* sign eval on nabs (64bit) */
+
+    CC_OP_ADD_32,               /* overflow on add (32bit) */
+    CC_OP_ADDU_32,              /* overflow on unsigned add (32bit) */
+    CC_OP_ADDC_32,              /* overflow on unsigned add-carry (32bit) */
+    CC_OP_SUB_32,               /* overflow on subtraction (32bit) */
+    CC_OP_SUBU_32,              /* overflow on unsigned subtraction (32bit) */
+    CC_OP_SUBB_32,              /* overflow on unsigned sub-borrow (32bit) */
+    CC_OP_ABS_32,               /* sign eval on abs (64bit) */
+    CC_OP_NABS_32,              /* sign eval on nabs (64bit) */
+
+    CC_OP_COMP_32,              /* complement */
+    CC_OP_COMP_64,              /* complement */
+
+    CC_OP_TM_32,                /* test under mask (32bit) */
+    CC_OP_TM_64,                /* test under mask (64bit) */
+
+    CC_OP_NZ_F32,               /* FP dst != 0 (32bit) */
+    CC_OP_NZ_F64,               /* FP dst != 0 (64bit) */
+    CC_OP_NZ_F128,              /* FP dst != 0 (128bit) */
+
+    CC_OP_ICM,                  /* insert characters under mask */
+    CC_OP_SLA_32,               /* Calculate shift left signed (32bit) */
+    CC_OP_SLA_64,               /* Calculate shift left signed (64bit) */
+    CC_OP_FLOGR,                /* find leftmost one */
+    CC_OP_MAX
+};
+
+static const char *cc_names[] = {
+    [CC_OP_CONST0]    = "CC_OP_CONST0",
+    [CC_OP_CONST1]    = "CC_OP_CONST1",
+    [CC_OP_CONST2]    = "CC_OP_CONST2",
+    [CC_OP_CONST3]    = "CC_OP_CONST3",
+    [CC_OP_DYNAMIC]   = "CC_OP_DYNAMIC",
+    [CC_OP_STATIC]    = "CC_OP_STATIC",
+    [CC_OP_NZ]        = "CC_OP_NZ",
+    [CC_OP_LTGT_32]   = "CC_OP_LTGT_32",
+    [CC_OP_LTGT_64]   = "CC_OP_LTGT_64",
+    [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
+    [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64",
+    [CC_OP_LTGT0_32]  = "CC_OP_LTGT0_32",
+    [CC_OP_LTGT0_64]  = "CC_OP_LTGT0_64",
+    [CC_OP_ADD_64]    = "CC_OP_ADD_64",
+    [CC_OP_ADDU_64]   = "CC_OP_ADDU_64",
+    [CC_OP_ADDC_64]   = "CC_OP_ADDC_64",
+    [CC_OP_SUB_64]    = "CC_OP_SUB_64",
+    [CC_OP_SUBU_64]   = "CC_OP_SUBU_64",
+    [CC_OP_SUBB_64]   = "CC_OP_SUBB_64",
+    [CC_OP_ABS_64]    = "CC_OP_ABS_64",
+    [CC_OP_NABS_64]   = "CC_OP_NABS_64",
+    [CC_OP_ADD_32]    = "CC_OP_ADD_32",
+    [CC_OP_ADDU_32]   = "CC_OP_ADDU_32",
+    [CC_OP_ADDC_32]   = "CC_OP_ADDC_32",
+    [CC_OP_SUB_32]    = "CC_OP_SUB_32",
+    [CC_OP_SUBU_32]   = "CC_OP_SUBU_32",
+    [CC_OP_SUBB_32]   = "CC_OP_SUBB_32",
+    [CC_OP_ABS_32]    = "CC_OP_ABS_32",
+    [CC_OP_NABS_32]   = "CC_OP_NABS_32",
+    [CC_OP_COMP_32]   = "CC_OP_COMP_32",
+    [CC_OP_COMP_64]   = "CC_OP_COMP_64",
+    [CC_OP_TM_32]     = "CC_OP_TM_32",
+    [CC_OP_TM_64]     = "CC_OP_TM_64",
+    [CC_OP_NZ_F32]    = "CC_OP_NZ_F32",
+    [CC_OP_NZ_F64]    = "CC_OP_NZ_F64",
+    [CC_OP_NZ_F128]   = "CC_OP_NZ_F128",
+    [CC_OP_ICM]       = "CC_OP_ICM",
+    [CC_OP_SLA_32]    = "CC_OP_SLA_32",
+    [CC_OP_SLA_64]    = "CC_OP_SLA_64",
+    [CC_OP_FLOGR]     = "CC_OP_FLOGR",
+};
+
+static inline const char *cc_name(int cc_op)
+{
+    return cc_names[cc_op];
+}
+
+static inline void setcc(S390CPU *cpu, uint64_t cc)
+{
+    CPUS390XState *env = &cpu->env;
+
+    env->psw.mask &= ~(3ull << 44);
+    env->psw.mask |= (cc & 3) << 44;
+    env->cc_op = cc;
+}
+
+typedef struct LowCore
+{
+    /* prefix area: defined by architecture */
+    uint32_t        ccw1[2];                  /* 0x000 */
+    uint32_t        ccw2[4];                  /* 0x008 */
+    uint8_t         pad1[0x80-0x18];          /* 0x018 */
+    uint32_t        ext_params;               /* 0x080 */
+    uint16_t        cpu_addr;                 /* 0x084 */
+    uint16_t        ext_int_code;             /* 0x086 */
+    uint16_t        svc_ilen;                 /* 0x088 */
+    uint16_t        svc_code;                 /* 0x08a */
+    uint16_t        pgm_ilen;                 /* 0x08c */
+    uint16_t        pgm_code;                 /* 0x08e */
+    uint32_t        data_exc_code;            /* 0x090 */
+    uint16_t        mon_class_num;            /* 0x094 */
+    uint16_t        per_perc_atmid;           /* 0x096 */
+    uint64_t        per_address;              /* 0x098 */
+    uint8_t         exc_access_id;            /* 0x0a0 */
+    uint8_t         per_access_id;            /* 0x0a1 */
+    uint8_t         op_access_id;             /* 0x0a2 */
+    uint8_t         ar_access_id;             /* 0x0a3 */
+    uint8_t         pad2[0xA8-0xA4];          /* 0x0a4 */
+    uint64_t        trans_exc_code;           /* 0x0a8 */
+    uint64_t        monitor_code;             /* 0x0b0 */
+    uint16_t        subchannel_id;            /* 0x0b8 */
+    uint16_t        subchannel_nr;            /* 0x0ba */
+    uint32_t        io_int_parm;              /* 0x0bc */
+    uint32_t        io_int_word;              /* 0x0c0 */
+    uint8_t         pad3[0xc8-0xc4];          /* 0x0c4 */
+    uint32_t        stfl_fac_list;            /* 0x0c8 */
+    uint8_t         pad4[0xe8-0xcc];          /* 0x0cc */
+    uint32_t        mcck_interruption_code[2]; /* 0x0e8 */
+    uint8_t         pad5[0xf4-0xf0];          /* 0x0f0 */
+    uint32_t        external_damage_code;     /* 0x0f4 */
+    uint64_t        failing_storage_address;  /* 0x0f8 */
+    uint8_t         pad6[0x110-0x100];        /* 0x100 */
+    uint64_t        per_breaking_event_addr;  /* 0x110 */
+    uint8_t         pad7[0x120-0x118];        /* 0x118 */
+    PSW             restart_old_psw;          /* 0x120 */
+    PSW             external_old_psw;         /* 0x130 */
+    PSW             svc_old_psw;              /* 0x140 */
+    PSW             program_old_psw;          /* 0x150 */
+    PSW             mcck_old_psw;             /* 0x160 */
+    PSW             io_old_psw;               /* 0x170 */
+    uint8_t         pad8[0x1a0-0x180];        /* 0x180 */
+    PSW             restart_new_psw;          /* 0x1a0 */
+    PSW             external_new_psw;         /* 0x1b0 */
+    PSW             svc_new_psw;              /* 0x1c0 */
+    PSW             program_new_psw;          /* 0x1d0 */
+    PSW             mcck_new_psw;             /* 0x1e0 */
+    PSW             io_new_psw;               /* 0x1f0 */
+    PSW             return_psw;               /* 0x200 */
+    uint8_t         irb[64];                  /* 0x210 */
+    uint64_t        sync_enter_timer;         /* 0x250 */
+    uint64_t        async_enter_timer;        /* 0x258 */
+    uint64_t        exit_timer;               /* 0x260 */
+    uint64_t        last_update_timer;        /* 0x268 */
+    uint64_t        user_timer;               /* 0x270 */
+    uint64_t        system_timer;             /* 0x278 */
+    uint64_t        last_update_clock;        /* 0x280 */
+    uint64_t        steal_clock;              /* 0x288 */
+    PSW             return_mcck_psw;          /* 0x290 */
+    uint8_t         pad9[0xc00-0x2a0];        /* 0x2a0 */
+    /* System info area */
+    uint64_t        save_area[16];            /* 0xc00 */
+    uint8_t         pad10[0xd40-0xc80];       /* 0xc80 */
+    uint64_t        kernel_stack;             /* 0xd40 */
+    uint64_t        thread_info;              /* 0xd48 */
+    uint64_t        async_stack;              /* 0xd50 */
+    uint64_t        kernel_asce;              /* 0xd58 */
+    uint64_t        user_asce;                /* 0xd60 */
+    uint64_t        panic_stack;              /* 0xd68 */
+    uint64_t        user_exec_asce;           /* 0xd70 */
+    uint8_t         pad11[0xdc0-0xd78];       /* 0xd78 */
+
+    /* SMP info area: defined by DJB */
+    uint64_t        clock_comparator;         /* 0xdc0 */
+    uint64_t        ext_call_fast;            /* 0xdc8 */
+    uint64_t        percpu_offset;            /* 0xdd0 */
+    uint64_t        current_task;             /* 0xdd8 */
+    uint32_t        softirq_pending;          /* 0xde0 */
+    uint32_t        pad_0x0de4;               /* 0xde4 */
+    uint64_t        int_clock;                /* 0xde8 */
+    uint8_t         pad12[0xe00-0xdf0];       /* 0xdf0 */
+
+    /* 0xe00 is used as indicator for dump tools */
+    /* whether the kernel died with panic() or not */
+    uint32_t        panic_magic;              /* 0xe00 */
+
+    uint8_t         pad13[0x11b8-0xe04];      /* 0xe04 */
+
+    /* 64 bit extparam used for pfault, diag 250 etc  */
+    uint64_t        ext_params2;               /* 0x11B8 */
+
+    uint8_t         pad14[0x1200-0x11C0];      /* 0x11C0 */
+
+    /* System info area */
+
+    uint64_t        floating_pt_save_area[16]; /* 0x1200 */
+    uint64_t        gpregs_save_area[16];      /* 0x1280 */
+    uint32_t        st_status_fixed_logout[4]; /* 0x1300 */
+    uint8_t         pad15[0x1318-0x1310];      /* 0x1310 */
+    uint32_t        prefixreg_save_area;       /* 0x1318 */
+    uint32_t        fpt_creg_save_area;        /* 0x131c */
+    uint8_t         pad16[0x1324-0x1320];      /* 0x1320 */
+    uint32_t        tod_progreg_save_area;     /* 0x1324 */
+    uint32_t        cpu_timer_save_area[2];    /* 0x1328 */
+    uint32_t        clock_comp_save_area[2];   /* 0x1330 */
+    uint8_t         pad17[0x1340-0x1338];      /* 0x1338 */
+    uint32_t        access_regs_save_area[16]; /* 0x1340 */
+    uint64_t        cregs_save_area[16];       /* 0x1380 */
+
+    /* align to the top of the prefix area */
+
+    uint8_t         pad18[0x2000-0x1400];      /* 0x1400 */
+} QEMU_PACKED LowCore;
+
+/* STSI */
+#define STSI_LEVEL_MASK         0x00000000f0000000ULL
+#define STSI_LEVEL_CURRENT      0x0000000000000000ULL
+#define STSI_LEVEL_1            0x0000000010000000ULL
+#define STSI_LEVEL_2            0x0000000020000000ULL
+#define STSI_LEVEL_3            0x0000000030000000ULL
+#define STSI_R0_RESERVED_MASK   0x000000000fffff00ULL
+#define STSI_R0_SEL1_MASK       0x00000000000000ffULL
+#define STSI_R1_RESERVED_MASK   0x00000000ffff0000ULL
+#define STSI_R1_SEL2_MASK       0x000000000000ffffULL
+
+/* Basic Machine Configuration */
+struct sysib_111 {
+    uint32_t res1[8];
+    uint8_t  manuf[16];
+    uint8_t  type[4];
+    uint8_t  res2[12];
+    uint8_t  model[16];
+    uint8_t  sequence[16];
+    uint8_t  plant[4];
+    uint8_t  res3[156];
+};
+
+/* Basic Machine CPU */
+struct sysib_121 {
+    uint32_t res1[80];
+    uint8_t  sequence[16];
+    uint8_t  plant[4];
+    uint8_t  res2[2];
+    uint16_t cpu_addr;
+    uint8_t  res3[152];
+};
+
+/* Basic Machine CPUs */
+struct sysib_122 {
+    uint8_t res1[32];
+    uint32_t capability;
+    uint16_t total_cpus;
+    uint16_t active_cpus;
+    uint16_t standby_cpus;
+    uint16_t reserved_cpus;
+    uint16_t adjustments[2026];
+};
+
+/* LPAR CPU */
+struct sysib_221 {
+    uint32_t res1[80];
+    uint8_t  sequence[16];
+    uint8_t  plant[4];
+    uint16_t cpu_id;
+    uint16_t cpu_addr;
+    uint8_t  res3[152];
+};
+
+/* LPAR CPUs */
+struct sysib_222 {
+    uint32_t res1[32];
+    uint16_t lpar_num;
+    uint8_t  res2;
+    uint8_t  lcpuc;
+    uint16_t total_cpus;
+    uint16_t conf_cpus;
+    uint16_t standby_cpus;
+    uint16_t reserved_cpus;
+    uint8_t  name[8];
+    uint32_t caf;
+    uint8_t  res3[16];
+    uint16_t dedicated_cpus;
+    uint16_t shared_cpus;
+    uint8_t  res4[180];
+};
+
+/* VM CPUs */
+struct sysib_322 {
+    uint8_t  res1[31];
+    uint8_t  count;
+    struct {
+        uint8_t  res2[4];
+        uint16_t total_cpus;
+        uint16_t conf_cpus;
+        uint16_t standby_cpus;
+        uint16_t reserved_cpus;
+        uint8_t  name[8];
+        uint32_t caf;
+        uint8_t  cpi[16];
+        uint8_t res5[3];
+        uint8_t ext_name_encoding;
+        uint32_t res3;
+        uint8_t uuid[16];
+    } vm[8];
+    uint8_t res4[1504];
+    uint8_t ext_names[8][256];
+};
+
+/* MMU defines */
+#define _ASCE_ORIGIN            ~0xfffULL /* segment table origin             */
+#define _ASCE_SUBSPACE          0x200     /* subspace group control           */
+#define _ASCE_PRIVATE_SPACE     0x100     /* private space control            */
+#define _ASCE_ALT_EVENT         0x80      /* storage alteration event control */
+#define _ASCE_SPACE_SWITCH      0x40      /* space switch event               */
+#define _ASCE_REAL_SPACE        0x20      /* real space control               */
+#define _ASCE_TYPE_MASK         0x0c      /* asce table type mask             */
+#define _ASCE_TYPE_REGION1      0x0c      /* region first table type          */
+#define _ASCE_TYPE_REGION2      0x08      /* region second table type         */
+#define _ASCE_TYPE_REGION3      0x04      /* region third table type          */
+#define _ASCE_TYPE_SEGMENT      0x00      /* segment table type               */
+#define _ASCE_TABLE_LENGTH      0x03      /* region table length              */
+
+#define _REGION_ENTRY_ORIGIN    ~0xfffULL /* region/segment table origin      */
+#define _REGION_ENTRY_RO        0x200     /* region/segment protection bit    */
+#define _REGION_ENTRY_TF        0xc0      /* region/segment table offset      */
+#define _REGION_ENTRY_INV       0x20      /* invalid region table entry       */
+#define _REGION_ENTRY_TYPE_MASK 0x0c      /* region/segment table type mask   */
+#define _REGION_ENTRY_TYPE_R1   0x0c      /* region first table type          */
+#define _REGION_ENTRY_TYPE_R2   0x08      /* region second table type         */
+#define _REGION_ENTRY_TYPE_R3   0x04      /* region third table type          */
+#define _REGION_ENTRY_LENGTH    0x03      /* region third length              */
+
+#define _SEGMENT_ENTRY_ORIGIN   ~0x7ffULL /* segment table origin             */
+#define _SEGMENT_ENTRY_FC       0x400     /* format control                   */
+#define _SEGMENT_ENTRY_RO       0x200     /* page protection bit              */
+#define _SEGMENT_ENTRY_INV      0x20      /* invalid segment table entry      */
+
+#define _PAGE_RO        0x200            /* HW read-only bit  */
+#define _PAGE_INVALID   0x400            /* HW invalid bit    */
+#define _PAGE_RES0      0x800            /* bit must be zero  */
+
+#define SK_C                    (0x1 << 1)
+#define SK_R                    (0x1 << 2)
+#define SK_F                    (0x1 << 3)
+#define SK_ACC_MASK             (0xf << 4)
+
+/* SIGP order codes */
+#define SIGP_SENSE             0x01
+#define SIGP_EXTERNAL_CALL     0x02
+#define SIGP_EMERGENCY         0x03
+#define SIGP_START             0x04
+#define SIGP_STOP              0x05
+#define SIGP_RESTART           0x06
+#define SIGP_STOP_STORE_STATUS 0x09
+#define SIGP_INITIAL_CPU_RESET 0x0b
+#define SIGP_CPU_RESET         0x0c
+#define SIGP_SET_PREFIX        0x0d
+#define SIGP_STORE_STATUS_ADDR 0x0e
+#define SIGP_SET_ARCH          0x12
+#define SIGP_STORE_ADTL_STATUS 0x17
+
+/* SIGP condition codes */
+#define SIGP_CC_ORDER_CODE_ACCEPTED 0
+#define SIGP_CC_STATUS_STORED       1
+#define SIGP_CC_BUSY                2
+#define SIGP_CC_NOT_OPERATIONAL     3
+
+/* SIGP status bits */
+#define SIGP_STAT_EQUIPMENT_CHECK   0x80000000UL
+#define SIGP_STAT_INCORRECT_STATE   0x00000200UL
+#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
+#define SIGP_STAT_EXT_CALL_PENDING  0x00000080UL
+#define SIGP_STAT_STOPPED           0x00000040UL
+#define SIGP_STAT_OPERATOR_INTERV   0x00000020UL
+#define SIGP_STAT_CHECK_STOP        0x00000010UL
+#define SIGP_STAT_INOPERATIVE       0x00000004UL
+#define SIGP_STAT_INVALID_ORDER     0x00000002UL
+#define SIGP_STAT_RECEIVER_CHECK    0x00000001UL
+
+/* SIGP SET ARCHITECTURE modes */
+#define SIGP_MODE_ESA_S390 0
+#define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1
+#define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2
+
+void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
+int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
+                  target_ulong *raddr, int *flags, bool exc);
+int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
+uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
+                 uint64_t vr);
+void s390_cpu_recompute_watchpoints(CPUState *cs);
+
+int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
+                         int len, bool is_write);
+
+#define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len)    \
+        s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false)
+#define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len)       \
+        s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true)
+#define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len)   \
+        s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
+
+/* The value of the TOD clock for 1.1.1970. */
+#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
+
+/* Converts ns to s390's clock format */
+static inline uint64_t time2tod(uint64_t ns) {
+    return (ns << 9) / 125;
+}
+
+/* Converts s390's clock format to ns */
+static inline uint64_t tod2time(uint64_t t) {
+    return (t * 125) >> 9;
+}
+
+static inline void cpu_inject_ext(S390CPU *cpu, uint32_t code, uint32_t param,
+                                  uint64_t param64)
+{
+    CPUS390XState *env = &cpu->env;
+
+    if (env->ext_index == MAX_EXT_QUEUE - 1) {
+        /* ugh - can't queue anymore. Let's drop. */
+        return;
+    }
+
+    env->ext_index++;
+    assert(env->ext_index < MAX_EXT_QUEUE);
+
+    env->ext_queue[env->ext_index].code = code;
+    env->ext_queue[env->ext_index].param = param;
+    env->ext_queue[env->ext_index].param64 = param64;
+
+    env->pending_int |= INTERRUPT_EXT;
+    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+static inline void cpu_inject_io(S390CPU *cpu, uint16_t subchannel_id,
+                                 uint16_t subchannel_number,
+                                 uint32_t io_int_parm, uint32_t io_int_word)
+{
+    CPUS390XState *env = &cpu->env;
+    int isc = IO_INT_WORD_ISC(io_int_word);
+
+    if (env->io_index[isc] == MAX_IO_QUEUE - 1) {
+        /* ugh - can't queue anymore. Let's drop. */
+        return;
+    }
+
+    env->io_index[isc]++;
+    assert(env->io_index[isc] < MAX_IO_QUEUE);
+
+    env->io_queue[env->io_index[isc]][isc].id = subchannel_id;
+    env->io_queue[env->io_index[isc]][isc].nr = subchannel_number;
+    env->io_queue[env->io_index[isc]][isc].parm = io_int_parm;
+    env->io_queue[env->io_index[isc]][isc].word = io_int_word;
+
+    env->pending_int |= INTERRUPT_IO;
+    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+static inline void cpu_inject_crw_mchk(S390CPU *cpu)
+{
+    CPUS390XState *env = &cpu->env;
+
+    if (env->mchk_index == MAX_MCHK_QUEUE - 1) {
+        /* ugh - can't queue anymore. Let's drop. */
+        return;
+    }
+
+    env->mchk_index++;
+    assert(env->mchk_index < MAX_MCHK_QUEUE);
+
+    env->mchk_queue[env->mchk_index].type = 1;
+
+    env->pending_int |= INTERRUPT_MCHK;
+    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+/* from s390-virtio-ccw */
+#define MEM_SECTION_SIZE             0x10000000UL
+#define MAX_AVAIL_SLOTS              32
+
+/* fpu_helper.c */
+uint32_t set_cc_nz_f32(float32 v);
+uint32_t set_cc_nz_f64(float64 v);
+uint32_t set_cc_nz_f128(float128 v);
+
+/* misc_helper.c */
+#ifndef CONFIG_USER_ONLY
+int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3);
+void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3);
+#endif
+void program_interrupt(CPUS390XState *env, uint32_t code, int ilen);
+void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
+                                     uintptr_t retaddr);
+
+#ifdef CONFIG_KVM
+void kvm_s390_io_interrupt(uint16_t subchannel_id,
+                           uint16_t subchannel_nr, uint32_t io_int_parm,
+                           uint32_t io_int_word);
+void kvm_s390_crw_mchk(void);
+void kvm_s390_enable_css_support(S390CPU *cpu);
+int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
+                                    int vq, bool assign);
+int kvm_s390_cpu_restart(S390CPU *cpu);
+int kvm_s390_get_memslot_count(KVMState *s);
+void kvm_s390_clear_cmma_callback(void *opaque);
+int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state);
+void kvm_s390_reset_vcpu(S390CPU *cpu);
+int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit);
+void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu);
+int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu);
+#else
+static inline void kvm_s390_io_interrupt(uint16_t subchannel_id,
+                                        uint16_t subchannel_nr,
+                                        uint32_t io_int_parm,
+                                        uint32_t io_int_word)
+{
+}
+static inline void kvm_s390_crw_mchk(void)
+{
+}
+static inline void kvm_s390_enable_css_support(S390CPU *cpu)
+{
+}
+static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier,
+                                                  uint32_t sch, int vq,
+                                                  bool assign)
+{
+    return -ENOSYS;
+}
+static inline int kvm_s390_cpu_restart(S390CPU *cpu)
+{
+    return -ENOSYS;
+}
+static inline void kvm_s390_clear_cmma_callback(void *opaque)
+{
+}
+static inline int kvm_s390_get_memslot_count(KVMState *s)
+{
+  return MAX_AVAIL_SLOTS;
+}
+static inline int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
+{
+    return -ENOSYS;
+}
+static inline void kvm_s390_reset_vcpu(S390CPU *cpu)
+{
+}
+static inline int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit,
+                                         uint64_t *hw_limit)
+{
+    return 0;
+}
+static inline void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
+{
+}
+static inline int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
+{
+    return 0;
+}
+#endif
+
+static inline int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit)
+{
+    if (kvm_enabled()) {
+        return kvm_s390_set_mem_limit(kvm_state, new_limit, hw_limit);
+    }
+    return 0;
+}
+
+static inline void cmma_reset(S390CPU *cpu)
+{
+    if (kvm_enabled()) {
+        CPUState *cs = CPU(cpu);
+        kvm_s390_clear_cmma_callback(cs->kvm_state);
+    }
+}
+
+static inline int s390_cpu_restart(S390CPU *cpu)
+{
+    if (kvm_enabled()) {
+        return kvm_s390_cpu_restart(cpu);
+    }
+    return -ENOSYS;
+}
+
+static inline int s390_get_memslot_count(KVMState *s)
+{
+    if (kvm_enabled()) {
+        return kvm_s390_get_memslot_count(s);
+    } else {
+        return MAX_AVAIL_SLOTS;
+    }
+}
+
+void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
+                       uint32_t io_int_parm, uint32_t io_int_word);
+void s390_crw_mchk(void);
+
+static inline int s390_assign_subch_ioeventfd(EventNotifier *notifier,
+                                              uint32_t sch_id, int vq,
+                                              bool assign)
+{
+    return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign);
+}
+
+#ifdef CONFIG_KVM
+static inline bool vregs_needed(void *opaque)
+{
+    if (kvm_enabled()) {
+        return kvm_check_extension(kvm_state, KVM_CAP_S390_VECTOR_REGISTERS);
+    }
+    return 0;
+}
+#else
+static inline bool vregs_needed(void *opaque)
+{
+    return 0;
+}
+#endif
+#endif
diff --git a/qemu/target-s390x/fpu_helper.c b/qemu/target-s390x/fpu_helper.c
new file mode 100644
index 000000000..45b7ddfbe
--- /dev/null
+++ b/qemu/target-s390x/fpu_helper.c
@@ -0,0 +1,743 @@
+/*
+ *  S/390 FPU helper routines
+ *
+ *  Copyright (c) 2009 Ulrich Hecht
+ *  Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+#define RET128(F) (env->retxl = F.low, F.high)
+
+#define convert_bit(mask, from, to) \
+    (to < from                      \
+     ? (mask / (from / to)) & to    \
+     : (mask & from) * (to / from))
+
+static void ieee_exception(CPUS390XState *env, uint32_t dxc, uintptr_t retaddr)
+{
+    /* Install the DXC code.  */
+    env->fpc = (env->fpc & ~0xff00) | (dxc << 8);
+    /* Trap.  */
+    runtime_exception(env, PGM_DATA, retaddr);
+}
+
+/* Should be called after any operation that may raise IEEE exceptions.  */
+static void handle_exceptions(CPUS390XState *env, uintptr_t retaddr)
+{
+    unsigned s390_exc, qemu_exc;
+
+    /* Get the exceptions raised by the current operation.  Reset the
+       fpu_status contents so that the next operation has a clean slate.  */
+    qemu_exc = env->fpu_status.float_exception_flags;
+    if (qemu_exc == 0) {
+        return;
+    }
+    env->fpu_status.float_exception_flags = 0;
+
+    /* Convert softfloat exception bits to s390 exception bits.  */
+    s390_exc = 0;
+    s390_exc |= convert_bit(qemu_exc, float_flag_invalid, 0x80);
+    s390_exc |= convert_bit(qemu_exc, float_flag_divbyzero, 0x40);
+    s390_exc |= convert_bit(qemu_exc, float_flag_overflow, 0x20);
+    s390_exc |= convert_bit(qemu_exc, float_flag_underflow, 0x10);
+    s390_exc |= convert_bit(qemu_exc, float_flag_inexact, 0x08);
+
+    /* Install the exceptions that we raised.  */
+    env->fpc |= s390_exc << 16;
+
+    /* Send signals for enabled exceptions.  */
+    s390_exc &= env->fpc >> 24;
+    if (s390_exc) {
+        ieee_exception(env, s390_exc, retaddr);
+    }
+}
+
+static inline int float_comp_to_cc(CPUS390XState *env, int float_compare)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+
+    switch (float_compare) {
+    case float_relation_equal:
+        return 0;
+    case float_relation_less:
+        return 1;
+    case float_relation_greater:
+        return 2;
+    case float_relation_unordered:
+        return 3;
+    default:
+        cpu_abort(CPU(cpu), "unknown return value for float compare\n");
+    }
+}
+
+/* condition codes for unary FP ops */
+uint32_t set_cc_nz_f32(float32 v)
+{
+    if (float32_is_any_nan(v)) {
+        return 3;
+    } else if (float32_is_zero(v)) {
+        return 0;
+    } else if (float32_is_neg(v)) {
+        return 1;
+    } else {
+        return 2;
+    }
+}
+
+uint32_t set_cc_nz_f64(float64 v)
+{
+    if (float64_is_any_nan(v)) {
+        return 3;
+    } else if (float64_is_zero(v)) {
+        return 0;
+    } else if (float64_is_neg(v)) {
+        return 1;
+    } else {
+        return 2;
+    }
+}
+
+uint32_t set_cc_nz_f128(float128 v)
+{
+    if (float128_is_any_nan(v)) {
+        return 3;
+    } else if (float128_is_zero(v)) {
+        return 0;
+    } else if (float128_is_neg(v)) {
+        return 1;
+    } else {
+        return 2;
+    }
+}
+
+/* 32-bit FP addition */
+uint64_t HELPER(aeb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+    float32 ret = float32_add(f1, f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 64-bit FP addition */
+uint64_t HELPER(adb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+    float64 ret = float64_add(f1, f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 128-bit FP addition */
+uint64_t HELPER(axb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+                     uint64_t bh, uint64_t bl)
+{
+    float128 ret = float128_add(make_float128(ah, al),
+                                make_float128(bh, bl),
+                                &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return RET128(ret);
+}
+
+/* 32-bit FP subtraction */
+uint64_t HELPER(seb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+    float32 ret = float32_sub(f1, f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 64-bit FP subtraction */
+uint64_t HELPER(sdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+    float64 ret = float64_sub(f1, f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 128-bit FP subtraction */
+uint64_t HELPER(sxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+                     uint64_t bh, uint64_t bl)
+{
+    float128 ret = float128_sub(make_float128(ah, al),
+                                make_float128(bh, bl),
+                                &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return RET128(ret);
+}
+
+/* 32-bit FP division */
+uint64_t HELPER(deb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+    float32 ret = float32_div(f1, f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 64-bit FP division */
+uint64_t HELPER(ddb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+    float64 ret = float64_div(f1, f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 128-bit FP division */
+uint64_t HELPER(dxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+                     uint64_t bh, uint64_t bl)
+{
+    float128 ret = float128_div(make_float128(ah, al),
+                                make_float128(bh, bl),
+                                &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return RET128(ret);
+}
+
+/* 32-bit FP multiplication */
+uint64_t HELPER(meeb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+    float32 ret = float32_mul(f1, f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 64-bit FP multiplication */
+uint64_t HELPER(mdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+    float64 ret = float64_mul(f1, f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 64/32-bit FP multiplication */
+uint64_t HELPER(mdeb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+    float64 ret = float32_to_float64(f2, &env->fpu_status);
+    ret = float64_mul(f1, ret, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 128-bit FP multiplication */
+uint64_t HELPER(mxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+                     uint64_t bh, uint64_t bl)
+{
+    float128 ret = float128_mul(make_float128(ah, al),
+                                make_float128(bh, bl),
+                                &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return RET128(ret);
+}
+
+/* 128/64-bit FP multiplication */
+uint64_t HELPER(mxdb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+                      uint64_t f2)
+{
+    float128 ret = float64_to_float128(f2, &env->fpu_status);
+    ret = float128_mul(make_float128(ah, al), ret, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return RET128(ret);
+}
+
+/* convert 32-bit float to 64-bit float */
+uint64_t HELPER(ldeb)(CPUS390XState *env, uint64_t f2)
+{
+    float64 ret = float32_to_float64(f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return float64_maybe_silence_nan(ret);
+}
+
+/* convert 128-bit float to 64-bit float */
+uint64_t HELPER(ldxb)(CPUS390XState *env, uint64_t ah, uint64_t al)
+{
+    float64 ret = float128_to_float64(make_float128(ah, al), &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return float64_maybe_silence_nan(ret);
+}
+
+/* convert 64-bit float to 128-bit float */
+uint64_t HELPER(lxdb)(CPUS390XState *env, uint64_t f2)
+{
+    float128 ret = float64_to_float128(f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return RET128(float128_maybe_silence_nan(ret));
+}
+
+/* convert 32-bit float to 128-bit float */
+uint64_t HELPER(lxeb)(CPUS390XState *env, uint64_t f2)
+{
+    float128 ret = float32_to_float128(f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return RET128(float128_maybe_silence_nan(ret));
+}
+
+/* convert 64-bit float to 32-bit float */
+uint64_t HELPER(ledb)(CPUS390XState *env, uint64_t f2)
+{
+    float32 ret = float64_to_float32(f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return float32_maybe_silence_nan(ret);
+}
+
+/* convert 128-bit float to 32-bit float */
+uint64_t HELPER(lexb)(CPUS390XState *env, uint64_t ah, uint64_t al)
+{
+    float32 ret = float128_to_float32(make_float128(ah, al), &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return float32_maybe_silence_nan(ret);
+}
+
+/* 32-bit FP compare */
+uint32_t HELPER(ceb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+    int cmp = float32_compare_quiet(f1, f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return float_comp_to_cc(env, cmp);
+}
+
+/* 64-bit FP compare */
+uint32_t HELPER(cdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+    int cmp = float64_compare_quiet(f1, f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return float_comp_to_cc(env, cmp);
+}
+
+/* 128-bit FP compare */
+uint32_t HELPER(cxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+                     uint64_t bh, uint64_t bl)
+{
+    int cmp = float128_compare_quiet(make_float128(ah, al),
+                                     make_float128(bh, bl),
+                                     &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return float_comp_to_cc(env, cmp);
+}
+
+static int swap_round_mode(CPUS390XState *env, int m3)
+{
+    int ret = env->fpu_status.float_rounding_mode;
+    switch (m3) {
+    case 0:
+        /* current mode */
+        break;
+    case 1:
+        /* biased round no nearest */
+    case 4:
+        /* round to nearest */
+        set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
+        break;
+    case 5:
+        /* round to zero */
+        set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
+        break;
+    case 6:
+        /* round to +inf */
+        set_float_rounding_mode(float_round_up, &env->fpu_status);
+        break;
+    case 7:
+        /* round to -inf */
+        set_float_rounding_mode(float_round_down, &env->fpu_status);
+        break;
+    }
+    return ret;
+}
+
+/* convert 64-bit int to 32-bit float */
+uint64_t HELPER(cegb)(CPUS390XState *env, int64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float32 ret = int64_to_float32(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 64-bit int to 64-bit float */
+uint64_t HELPER(cdgb)(CPUS390XState *env, int64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float64 ret = int64_to_float64(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 64-bit int to 128-bit float */
+uint64_t HELPER(cxgb)(CPUS390XState *env, int64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float128 ret = int64_to_float128(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return RET128(ret);
+}
+
+/* convert 64-bit uint to 32-bit float */
+uint64_t HELPER(celgb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float32 ret = uint64_to_float32(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 64-bit uint to 64-bit float */
+uint64_t HELPER(cdlgb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float64 ret = uint64_to_float64(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 64-bit uint to 128-bit float */
+uint64_t HELPER(cxlgb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float128 ret = uint64_to_float128(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return RET128(ret);
+}
+
+/* convert 32-bit float to 64-bit int */
+uint64_t HELPER(cgeb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    int64_t ret = float32_to_int64(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 64-bit float to 64-bit int */
+uint64_t HELPER(cgdb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    int64_t ret = float64_to_int64(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 128-bit float to 64-bit int */
+uint64_t HELPER(cgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float128 v2 = make_float128(h, l);
+    int64_t ret = float128_to_int64(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 32-bit float to 32-bit int */
+uint64_t HELPER(cfeb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    int32_t ret = float32_to_int32(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 64-bit float to 32-bit int */
+uint64_t HELPER(cfdb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    int32_t ret = float64_to_int32(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 128-bit float to 32-bit int */
+uint64_t HELPER(cfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float128 v2 = make_float128(h, l);
+    int32_t ret = float128_to_int32(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 32-bit float to 64-bit uint */
+uint64_t HELPER(clgeb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    uint64_t ret;
+    v2 = float32_to_float64(v2, &env->fpu_status);
+    ret = float64_to_uint64(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 64-bit float to 64-bit uint */
+uint64_t HELPER(clgdb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    uint64_t ret = float64_to_uint64(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 128-bit float to 64-bit uint */
+uint64_t HELPER(clgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float128 v2 = make_float128(h, l);
+    /* ??? Not 100% correct.  */
+    uint64_t ret = float128_to_int64(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 32-bit float to 32-bit uint */
+uint64_t HELPER(clfeb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    uint32_t ret = float32_to_uint32(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 64-bit float to 32-bit uint */
+uint64_t HELPER(clfdb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    uint32_t ret = float64_to_uint32(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* convert 128-bit float to 32-bit uint */
+uint64_t HELPER(clfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float128 v2 = make_float128(h, l);
+    /* Not 100% correct.  */
+    uint32_t ret = float128_to_int64(v2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* round to integer 32-bit */
+uint64_t HELPER(fieb)(CPUS390XState *env, uint64_t f2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float32 ret = float32_round_to_int(f2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* round to integer 64-bit */
+uint64_t HELPER(fidb)(CPUS390XState *env, uint64_t f2, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float64 ret = float64_round_to_int(f2, &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* round to integer 128-bit */
+uint64_t HELPER(fixb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint32_t m3)
+{
+    int hold = swap_round_mode(env, m3);
+    float128 ret = float128_round_to_int(make_float128(ah, al),
+                                         &env->fpu_status);
+    set_float_rounding_mode(hold, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return RET128(ret);
+}
+
+/* 32-bit FP multiply and add */
+uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1,
+                      uint64_t f2, uint64_t f3)
+{
+    float32 ret = float32_muladd(f2, f3, f1, 0, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 64-bit FP multiply and add */
+uint64_t HELPER(madb)(CPUS390XState *env, uint64_t f1,
+                      uint64_t f2, uint64_t f3)
+{
+    float64 ret = float64_muladd(f2, f3, f1, 0, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 32-bit FP multiply and subtract */
+uint64_t HELPER(mseb)(CPUS390XState *env, uint64_t f1,
+                      uint64_t f2, uint64_t f3)
+{
+    float32 ret = float32_muladd(f2, f3, f1, float_muladd_negate_c,
+                                 &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* 64-bit FP multiply and subtract */
+uint64_t HELPER(msdb)(CPUS390XState *env, uint64_t f1,
+                      uint64_t f2, uint64_t f3)
+{
+    float64 ret = float64_muladd(f2, f3, f1, float_muladd_negate_c,
+                                 &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* test data class 32-bit */
+uint32_t HELPER(tceb)(uint64_t f1, uint64_t m2)
+{
+    float32 v1 = f1;
+    int neg = float32_is_neg(v1);
+    uint32_t cc = 0;
+
+    if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
+        (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
+        (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
+        (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
+        cc = 1;
+    } else if (m2 & (1 << (9-neg))) {
+        /* assume normalized number */
+        cc = 1;
+    }
+    /* FIXME: denormalized? */
+    return cc;
+}
+
+/* test data class 64-bit */
+uint32_t HELPER(tcdb)(uint64_t v1, uint64_t m2)
+{
+    int neg = float64_is_neg(v1);
+    uint32_t cc = 0;
+
+    if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
+        (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
+        (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
+        (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
+        cc = 1;
+    } else if (m2 & (1 << (9-neg))) {
+        /* assume normalized number */
+        cc = 1;
+    }
+    /* FIXME: denormalized? */
+    return cc;
+}
+
+/* test data class 128-bit */
+uint32_t HELPER(tcxb)(uint64_t ah, uint64_t al, uint64_t m2)
+{
+    float128 v1 = make_float128(ah, al);
+    int neg = float128_is_neg(v1);
+    uint32_t cc = 0;
+
+    if ((float128_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
+        (float128_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
+        (float128_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
+        (float128_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
+        cc = 1;
+    } else if (m2 & (1 << (9-neg))) {
+        /* assume normalized number */
+        cc = 1;
+    }
+    /* FIXME: denormalized? */
+    return cc;
+}
+
+/* square root 32-bit */
+uint64_t HELPER(sqeb)(CPUS390XState *env, uint64_t f2)
+{
+    float32 ret = float32_sqrt(f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* square root 64-bit */
+uint64_t HELPER(sqdb)(CPUS390XState *env, uint64_t f2)
+{
+    float64 ret = float64_sqrt(f2, &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return ret;
+}
+
+/* square root 128-bit */
+uint64_t HELPER(sqxb)(CPUS390XState *env, uint64_t ah, uint64_t al)
+{
+    float128 ret = float128_sqrt(make_float128(ah, al), &env->fpu_status);
+    handle_exceptions(env, GETPC());
+    return RET128(ret);
+}
+
+static const int fpc_to_rnd[4] = {
+    float_round_nearest_even,
+    float_round_to_zero,
+    float_round_up,
+    float_round_down
+};
+
+/* set fpc */
+void HELPER(sfpc)(CPUS390XState *env, uint64_t fpc)
+{
+    /* Install everything in the main FPC.  */
+    env->fpc = fpc;
+
+    /* Install the rounding mode in the shadow fpu_status.  */
+    set_float_rounding_mode(fpc_to_rnd[fpc & 3], &env->fpu_status);
+}
+
+/* set fpc and signal */
+void HELPER(sfas)(CPUS390XState *env, uint64_t val)
+{
+    uint32_t signalling = env->fpc;
+    uint32_t source = val;
+    uint32_t s390_exc;
+
+    /* The contents of the source operand are placed in the FPC register;
+       then the flags in the FPC register are set to the logical OR of the
+       signalling flags and the source flags.  */
+    env->fpc = source | (signalling & 0x00ff0000);
+    set_float_rounding_mode(fpc_to_rnd[source & 3], &env->fpu_status);
+
+    /* If any signalling flag is 1 and the corresponding source mask
+       is also 1, a simulated-iee-exception trap occurs.  */
+    s390_exc = (signalling >> 16) & (source >> 24);
+    if (s390_exc) {
+        ieee_exception(env, s390_exc | 3, GETPC());
+    }
+}
diff --git a/qemu/target-s390x/gdbstub.c b/qemu/target-s390x/gdbstub.c
new file mode 100644
index 000000000..31f204964
--- /dev/null
+++ b/qemu/target-s390x/gdbstub.c
@@ -0,0 +1,190 @@
+/*
+ * s390x gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "config.h"
+#include "qemu-common.h"
+#include "exec/gdbstub.h"
+#include "qemu/bitops.h"
+
+int s390_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
+    uint64_t val;
+    int cc_op;
+
+    switch (n) {
+    case S390_PSWM_REGNUM:
+        if (tcg_enabled()) {
+            cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
+                            env->cc_vr);
+            val = deposit64(env->psw.mask, 44, 2, cc_op);
+            return gdb_get_regl(mem_buf, val);
+        }
+        return gdb_get_regl(mem_buf, env->psw.mask);
+    case S390_PSWA_REGNUM:
+        return gdb_get_regl(mem_buf, env->psw.addr);
+    case S390_R0_REGNUM ... S390_R15_REGNUM:
+        return gdb_get_regl(mem_buf, env->regs[n - S390_R0_REGNUM]);
+    }
+    return 0;
+}
+
+int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
+    target_ulong tmpl = ldtul_p(mem_buf);
+
+    switch (n) {
+    case S390_PSWM_REGNUM:
+        env->psw.mask = tmpl;
+        if (tcg_enabled()) {
+            env->cc_op = extract64(tmpl, 44, 2);
+        }
+        break;
+    case S390_PSWA_REGNUM:
+        env->psw.addr = tmpl;
+        break;
+    case S390_R0_REGNUM ... S390_R15_REGNUM:
+        env->regs[n - S390_R0_REGNUM] = tmpl;
+        break;
+    default:
+        return 0;
+    }
+    return 8;
+}
+
+/* the values represent the positions in s390-acr.xml */
+#define S390_A0_REGNUM 0
+#define S390_A15_REGNUM 15
+/* total number of registers in s390-acr.xml */
+#define S390_NUM_AC_REGS 16
+
+static int cpu_read_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+    switch (n) {
+    case S390_A0_REGNUM ... S390_A15_REGNUM:
+        return gdb_get_reg32(mem_buf, env->aregs[n]);
+    default:
+        return 0;
+    }
+}
+
+static int cpu_write_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+    switch (n) {
+    case S390_A0_REGNUM ... S390_A15_REGNUM:
+        env->aregs[n] = ldl_p(mem_buf);
+        cpu_synchronize_post_init(ENV_GET_CPU(env));
+        return 4;
+    default:
+        return 0;
+    }
+}
+
+/* the values represent the positions in s390-fpr.xml */
+#define S390_FPC_REGNUM 0
+#define S390_F0_REGNUM 1
+#define S390_F15_REGNUM 16
+/* total number of registers in s390-fpr.xml */
+#define S390_NUM_FP_REGS 17
+
+static int cpu_read_fp_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+    switch (n) {
+    case S390_FPC_REGNUM:
+        return gdb_get_reg32(mem_buf, env->fpc);
+    case S390_F0_REGNUM ... S390_F15_REGNUM:
+        return gdb_get_reg64(mem_buf, get_freg(env, n - S390_F0_REGNUM)->ll);
+    default:
+        return 0;
+    }
+}
+
+static int cpu_write_fp_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+    switch (n) {
+    case S390_FPC_REGNUM:
+        env->fpc = ldl_p(mem_buf);
+        return 4;
+    case S390_F0_REGNUM ... S390_F15_REGNUM:
+        get_freg(env, n - S390_F0_REGNUM)->ll = ldtul_p(mem_buf);
+        return 8;
+    default:
+        return 0;
+    }
+}
+
+/* the values represent the positions in s390-vx.xml */
+#define S390_V0L_REGNUM 0
+#define S390_V15L_REGNUM 15
+#define S390_V16_REGNUM 16
+#define S390_V31_REGNUM 31
+/* total number of registers in s390-vx.xml */
+#define S390_NUM_VREGS 32
+
+static int cpu_read_vreg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+    int ret;
+
+    switch (n) {
+    case S390_V0L_REGNUM ... S390_V15L_REGNUM:
+        ret = gdb_get_reg64(mem_buf, env->vregs[n][1].ll);
+        break;
+    case S390_V16_REGNUM ... S390_V31_REGNUM:
+        ret = gdb_get_reg64(mem_buf, env->vregs[n][0].ll);
+        ret += gdb_get_reg64(mem_buf + 8, env->vregs[n][1].ll);
+        break;
+    default:
+        ret = 0;
+    }
+
+    return ret;
+}
+
+static int cpu_write_vreg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+    switch (n) {
+    case S390_V0L_REGNUM ... S390_V15L_REGNUM:
+        env->vregs[n][1].ll = ldtul_p(mem_buf + 8);
+        return 8;
+    case S390_V16_REGNUM ... S390_V31_REGNUM:
+        env->vregs[n][0].ll = ldtul_p(mem_buf);
+        env->vregs[n][1].ll = ldtul_p(mem_buf + 8);
+        return 16;
+    default:
+        return 0;
+    }
+}
+
+void s390_cpu_gdb_init(CPUState *cs)
+{
+    gdb_register_coprocessor(cs, cpu_read_ac_reg,
+                             cpu_write_ac_reg,
+                             S390_NUM_AC_REGS, "s390-acr.xml", 0);
+
+    gdb_register_coprocessor(cs, cpu_read_fp_reg,
+                             cpu_write_fp_reg,
+                             S390_NUM_FP_REGS, "s390-fpr.xml", 0);
+
+    gdb_register_coprocessor(cs, cpu_read_vreg,
+                             cpu_write_vreg,
+                             S390_NUM_VREGS, "s390-vx.xml", 0);
+}
diff --git a/qemu/target-s390x/helper.c b/qemu/target-s390x/helper.c
new file mode 100644
index 000000000..d88700695
--- /dev/null
+++ b/qemu/target-s390x/helper.c
@@ -0,0 +1,652 @@
+/*
+ *  S/390 helpers
+ *
+ *  Copyright (c) 2009 Ulrich Hecht
+ *  Copyright (c) 2011 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu/timer.h"
+#include "exec/cpu_ldst.h"
+#ifndef CONFIG_USER_ONLY
+#include "sysemu/sysemu.h"
+#endif
+
+//#define DEBUG_S390
+//#define DEBUG_S390_STDOUT
+
+#ifdef DEBUG_S390
+#ifdef DEBUG_S390_STDOUT
+#define DPRINTF(fmt, ...) \
+    do { fprintf(stderr, fmt, ## __VA_ARGS__); \
+         qemu_log(fmt, ##__VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
+#endif
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+
+#ifndef CONFIG_USER_ONLY
+void s390x_tod_timer(void *opaque)
+{
+    S390CPU *cpu = opaque;
+    CPUS390XState *env = &cpu->env;
+
+    env->pending_int |= INTERRUPT_TOD;
+    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+void s390x_cpu_timer(void *opaque)
+{
+    S390CPU *cpu = opaque;
+    CPUS390XState *env = &cpu->env;
+
+    env->pending_int |= INTERRUPT_CPUTIMER;
+    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+#endif
+
+S390CPU *cpu_s390x_init(const char *cpu_model)
+{
+    S390CPU *cpu;
+
+    cpu = S390_CPU(object_new(TYPE_S390_CPU));
+
+    object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+    return cpu;
+}
+
+#if defined(CONFIG_USER_ONLY)
+
+void s390_cpu_do_interrupt(CPUState *cs)
+{
+    cs->exception_index = -1;
+}
+
+int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+                              int rw, int mmu_idx)
+{
+    S390CPU *cpu = S390_CPU(cs);
+
+    cs->exception_index = EXCP_PGM;
+    cpu->env.int_pgm_code = PGM_ADDRESSING;
+    /* On real machines this value is dropped into LowMem.  Since this
+       is userland, simply put this someplace that cpu_loop can find it.  */
+    cpu->env.__excp_addr = address;
+    return 1;
+}
+
+#else /* !CONFIG_USER_ONLY */
+
+/* Ensure to exit the TB after this call! */
+void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+
+    cs->exception_index = EXCP_PGM;
+    env->int_pgm_code = code;
+    env->int_pgm_ilen = ilen;
+}
+
+int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
+                              int rw, int mmu_idx)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
+    uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
+    target_ulong vaddr, raddr;
+    int prot;
+
+    DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
+            __func__, orig_vaddr, rw, mmu_idx);
+
+    orig_vaddr &= TARGET_PAGE_MASK;
+    vaddr = orig_vaddr;
+
+    /* 31-Bit mode */
+    if (!(env->psw.mask & PSW_MASK_64)) {
+        vaddr &= 0x7fffffff;
+    }
+
+    if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
+        /* Translation ended in exception */
+        return 1;
+    }
+
+    /* check out of RAM access */
+    if (raddr > (ram_size + virtio_size)) {
+        DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
+                (uint64_t)raddr, (uint64_t)ram_size);
+        trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
+        return 1;
+    }
+
+    qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
+            __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
+
+    tlb_set_page(cs, orig_vaddr, raddr, prot,
+                 mmu_idx, TARGET_PAGE_SIZE);
+
+    return 0;
+}
+
+hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
+    target_ulong raddr;
+    int prot;
+    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+
+    /* 31-Bit mode */
+    if (!(env->psw.mask & PSW_MASK_64)) {
+        vaddr &= 0x7fffffff;
+    }
+
+    mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false);
+
+    return raddr;
+}
+
+hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
+{
+    hwaddr phys_addr;
+    target_ulong page;
+
+    page = vaddr & TARGET_PAGE_MASK;
+    phys_addr = cpu_get_phys_page_debug(cs, page);
+    phys_addr += (vaddr & ~TARGET_PAGE_MASK);
+
+    return phys_addr;
+}
+
+void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
+{
+    uint64_t old_mask = env->psw.mask;
+
+    env->psw.addr = addr;
+    env->psw.mask = mask;
+    if (tcg_enabled()) {
+        env->cc_op = (mask >> 44) & 3;
+    }
+
+    if ((old_mask ^ mask) & PSW_MASK_PER) {
+        s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
+    }
+
+    if (mask & PSW_MASK_WAIT) {
+        S390CPU *cpu = s390_env_get_cpu(env);
+        if (s390_cpu_halt(cpu) == 0) {
+#ifndef CONFIG_USER_ONLY
+            qemu_system_shutdown_request();
+#endif
+        }
+    }
+}
+
+static uint64_t get_psw_mask(CPUS390XState *env)
+{
+    uint64_t r = env->psw.mask;
+
+    if (tcg_enabled()) {
+        env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
+                             env->cc_vr);
+
+        r &= ~PSW_MASK_CC;
+        assert(!(env->cc_op & ~3));
+        r |= (uint64_t)env->cc_op << 44;
+    }
+
+    return r;
+}
+
+static LowCore *cpu_map_lowcore(CPUS390XState *env)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    LowCore *lowcore;
+    hwaddr len = sizeof(LowCore);
+
+    lowcore = cpu_physical_memory_map(env->psa, &len, 1);
+
+    if (len < sizeof(LowCore)) {
+        cpu_abort(CPU(cpu), "Could not map lowcore\n");
+    }
+
+    return lowcore;
+}
+
+static void cpu_unmap_lowcore(LowCore *lowcore)
+{
+    cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
+}
+
+void do_restart_interrupt(CPUS390XState *env)
+{
+    uint64_t mask, addr;
+    LowCore *lowcore;
+
+    lowcore = cpu_map_lowcore(env);
+
+    lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+    lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
+    mask = be64_to_cpu(lowcore->restart_new_psw.mask);
+    addr = be64_to_cpu(lowcore->restart_new_psw.addr);
+
+    cpu_unmap_lowcore(lowcore);
+
+    load_psw(env, mask, addr);
+}
+
+static void do_program_interrupt(CPUS390XState *env)
+{
+    uint64_t mask, addr;
+    LowCore *lowcore;
+    int ilen = env->int_pgm_ilen;
+
+    switch (ilen) {
+    case ILEN_LATER:
+        ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
+        break;
+    case ILEN_LATER_INC:
+        ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
+        env->psw.addr += ilen;
+        break;
+    default:
+        assert(ilen == 2 || ilen == 4 || ilen == 6);
+    }
+
+    qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
+                  __func__, env->int_pgm_code, ilen);
+
+    lowcore = cpu_map_lowcore(env);
+
+    /* Signal PER events with the exception.  */
+    if (env->per_perc_atmid) {
+        env->int_pgm_code |= PGM_PER;
+        lowcore->per_address = cpu_to_be64(env->per_address);
+        lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
+        env->per_perc_atmid = 0;
+    }
+
+    lowcore->pgm_ilen = cpu_to_be16(ilen);
+    lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
+    lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+    lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
+    mask = be64_to_cpu(lowcore->program_new_psw.mask);
+    addr = be64_to_cpu(lowcore->program_new_psw.addr);
+    lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
+
+    cpu_unmap_lowcore(lowcore);
+
+    DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
+            env->int_pgm_code, ilen, env->psw.mask,
+            env->psw.addr);
+
+    load_psw(env, mask, addr);
+}
+
+static void do_svc_interrupt(CPUS390XState *env)
+{
+    uint64_t mask, addr;
+    LowCore *lowcore;
+
+    lowcore = cpu_map_lowcore(env);
+
+    lowcore->svc_code = cpu_to_be16(env->int_svc_code);
+    lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
+    lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+    lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
+    mask = be64_to_cpu(lowcore->svc_new_psw.mask);
+    addr = be64_to_cpu(lowcore->svc_new_psw.addr);
+
+    cpu_unmap_lowcore(lowcore);
+
+    load_psw(env, mask, addr);
+
+    /* When a PER event is pending, the PER exception has to happen
+       immediately after the SERVICE CALL one.  */
+    if (env->per_perc_atmid) {
+        env->int_pgm_code = PGM_PER;
+        env->int_pgm_ilen = env->int_svc_ilen;
+        do_program_interrupt(env);
+    }
+}
+
+#define VIRTIO_SUBCODE_64 0x0D00
+
+static void do_ext_interrupt(CPUS390XState *env)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    uint64_t mask, addr;
+    LowCore *lowcore;
+    ExtQueue *q;
+
+    if (!(env->psw.mask & PSW_MASK_EXT)) {
+        cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
+    }
+
+    if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
+        cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
+    }
+
+    q = &env->ext_queue[env->ext_index];
+    lowcore = cpu_map_lowcore(env);
+
+    lowcore->ext_int_code = cpu_to_be16(q->code);
+    lowcore->ext_params = cpu_to_be32(q->param);
+    lowcore->ext_params2 = cpu_to_be64(q->param64);
+    lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+    lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
+    lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
+    mask = be64_to_cpu(lowcore->external_new_psw.mask);
+    addr = be64_to_cpu(lowcore->external_new_psw.addr);
+
+    cpu_unmap_lowcore(lowcore);
+
+    env->ext_index--;
+    if (env->ext_index == -1) {
+        env->pending_int &= ~INTERRUPT_EXT;
+    }
+
+    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
+            env->psw.mask, env->psw.addr);
+
+    load_psw(env, mask, addr);
+}
+
+static void do_io_interrupt(CPUS390XState *env)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    LowCore *lowcore;
+    IOIntQueue *q;
+    uint8_t isc;
+    int disable = 1;
+    int found = 0;
+
+    if (!(env->psw.mask & PSW_MASK_IO)) {
+        cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
+    }
+
+    for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
+        uint64_t isc_bits;
+
+        if (env->io_index[isc] < 0) {
+            continue;
+        }
+        if (env->io_index[isc] >= MAX_IO_QUEUE) {
+            cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
+                      isc, env->io_index[isc]);
+        }
+
+        q = &env->io_queue[env->io_index[isc]][isc];
+        isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
+        if (!(env->cregs[6] & isc_bits)) {
+            disable = 0;
+            continue;
+        }
+        if (!found) {
+            uint64_t mask, addr;
+
+            found = 1;
+            lowcore = cpu_map_lowcore(env);
+
+            lowcore->subchannel_id = cpu_to_be16(q->id);
+            lowcore->subchannel_nr = cpu_to_be16(q->nr);
+            lowcore->io_int_parm = cpu_to_be32(q->parm);
+            lowcore->io_int_word = cpu_to_be32(q->word);
+            lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+            lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
+            mask = be64_to_cpu(lowcore->io_new_psw.mask);
+            addr = be64_to_cpu(lowcore->io_new_psw.addr);
+
+            cpu_unmap_lowcore(lowcore);
+
+            env->io_index[isc]--;
+
+            DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
+                    env->psw.mask, env->psw.addr);
+            load_psw(env, mask, addr);
+        }
+        if (env->io_index[isc] >= 0) {
+            disable = 0;
+        }
+        continue;
+    }
+
+    if (disable) {
+        env->pending_int &= ~INTERRUPT_IO;
+    }
+
+}
+
+static void do_mchk_interrupt(CPUS390XState *env)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    uint64_t mask, addr;
+    LowCore *lowcore;
+    MchkQueue *q;
+    int i;
+
+    if (!(env->psw.mask & PSW_MASK_MCHECK)) {
+        cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
+    }
+
+    if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
+        cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
+    }
+
+    q = &env->mchk_queue[env->mchk_index];
+
+    if (q->type != 1) {
+        /* Don't know how to handle this... */
+        cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
+    }
+    if (!(env->cregs[14] & (1 << 28))) {
+        /* CRW machine checks disabled */
+        return;
+    }
+
+    lowcore = cpu_map_lowcore(env);
+
+    for (i = 0; i < 16; i++) {
+        lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
+        lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
+        lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
+        lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
+    }
+    lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
+    lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
+    lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
+    lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
+    lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
+    lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
+    lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
+
+    lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
+    lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
+    lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+    lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
+    mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
+    addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
+
+    cpu_unmap_lowcore(lowcore);
+
+    env->mchk_index--;
+    if (env->mchk_index == -1) {
+        env->pending_int &= ~INTERRUPT_MCHK;
+    }
+
+    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
+            env->psw.mask, env->psw.addr);
+
+    load_psw(env, mask, addr);
+}
+
+void s390_cpu_do_interrupt(CPUState *cs)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
+
+    qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
+                  __func__, cs->exception_index, env->psw.addr);
+
+    s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
+    /* handle machine checks */
+    if ((env->psw.mask & PSW_MASK_MCHECK) &&
+        (cs->exception_index == -1)) {
+        if (env->pending_int & INTERRUPT_MCHK) {
+            cs->exception_index = EXCP_MCHK;
+        }
+    }
+    /* handle external interrupts */
+    if ((env->psw.mask & PSW_MASK_EXT) &&
+        cs->exception_index == -1) {
+        if (env->pending_int & INTERRUPT_EXT) {
+            /* code is already in env */
+            cs->exception_index = EXCP_EXT;
+        } else if (env->pending_int & INTERRUPT_TOD) {
+            cpu_inject_ext(cpu, 0x1004, 0, 0);
+            cs->exception_index = EXCP_EXT;
+            env->pending_int &= ~INTERRUPT_EXT;
+            env->pending_int &= ~INTERRUPT_TOD;
+        } else if (env->pending_int & INTERRUPT_CPUTIMER) {
+            cpu_inject_ext(cpu, 0x1005, 0, 0);
+            cs->exception_index = EXCP_EXT;
+            env->pending_int &= ~INTERRUPT_EXT;
+            env->pending_int &= ~INTERRUPT_TOD;
+        }
+    }
+    /* handle I/O interrupts */
+    if ((env->psw.mask & PSW_MASK_IO) &&
+        (cs->exception_index == -1)) {
+        if (env->pending_int & INTERRUPT_IO) {
+            cs->exception_index = EXCP_IO;
+        }
+    }
+
+    switch (cs->exception_index) {
+    case EXCP_PGM:
+        do_program_interrupt(env);
+        break;
+    case EXCP_SVC:
+        do_svc_interrupt(env);
+        break;
+    case EXCP_EXT:
+        do_ext_interrupt(env);
+        break;
+    case EXCP_IO:
+        do_io_interrupt(env);
+        break;
+    case EXCP_MCHK:
+        do_mchk_interrupt(env);
+        break;
+    }
+    cs->exception_index = -1;
+
+    if (!env->pending_int) {
+        cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+    }
+}
+
+bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+    if (interrupt_request & CPU_INTERRUPT_HARD) {
+        S390CPU *cpu = S390_CPU(cs);
+        CPUS390XState *env = &cpu->env;
+
+        if (env->psw.mask & PSW_MASK_EXT) {
+            s390_cpu_do_interrupt(cs);
+            return true;
+        }
+    }
+    return false;
+}
+
+void s390_cpu_recompute_watchpoints(CPUState *cs)
+{
+    const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
+
+    /* We are called when the watchpoints have changed. First
+       remove them all.  */
+    cpu_watchpoint_remove_all(cs, BP_CPU);
+
+    /* Return if PER is not enabled */
+    if (!(env->psw.mask & PSW_MASK_PER)) {
+        return;
+    }
+
+    /* Return if storage-alteration event is not enabled.  */
+    if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
+        return;
+    }
+
+    if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
+        /* We can't create a watchoint spanning the whole memory range, so
+           split it in two parts.   */
+        cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
+        cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
+    } else if (env->cregs[10] > env->cregs[11]) {
+        /* The address range loops, create two watchpoints.  */
+        cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
+                              wp_flags, NULL);
+        cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
+
+    } else {
+        /* Default case, create a single watchpoint.  */
+        cpu_watchpoint_insert(cs, env->cregs[10],
+                              env->cregs[11] - env->cregs[10] + 1,
+                              wp_flags, NULL);
+    }
+}
+
+void s390x_cpu_debug_excp_handler(CPUState *cs)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
+    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
+
+    if (wp_hit && wp_hit->flags & BP_CPU) {
+        /* FIXME: When the storage-alteration-space control bit is set,
+           the exception should only be triggered if the memory access
+           is done using an address space with the storage-alteration-event
+           bit set.  We have no way to detect that with the current
+           watchpoint code.  */
+        cs->watchpoint_hit = NULL;
+
+        env->per_address = env->psw.addr;
+        env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
+        /* FIXME: We currently no way to detect the address space used
+           to trigger the watchpoint.  For now just consider it is the
+           current default ASC. This turn to be true except when MVCP
+           and MVCS instrutions are not used.  */
+        env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
+
+        /* Remove all watchpoints to re-execute the code.  A PER exception
+           will be triggered, it will call load_psw which will recompute
+           the watchpoints.  */
+        cpu_watchpoint_remove_all(cs, BP_CPU);
+        cpu_resume_from_signal(cs, NULL);
+    }
+}
+#endif /* CONFIG_USER_ONLY */
diff --git a/qemu/target-s390x/helper.h b/qemu/target-s390x/helper.h
new file mode 100644
index 000000000..7e06119e9
--- /dev/null
+++ b/qemu/target-s390x/helper.h
@@ -0,0 +1,133 @@
+DEF_HELPER_2(exception, noreturn, env, i32)
+DEF_HELPER_FLAGS_4(nc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(oc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(xc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvc, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(clc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_3(mvcl, i32, env, i32, i32)
+DEF_HELPER_FLAGS_4(clm, TCG_CALL_NO_WG, i32, env, i32, i32, i64)
+DEF_HELPER_FLAGS_3(divs32, TCG_CALL_NO_WG, s64, env, s64, s64)
+DEF_HELPER_FLAGS_3(divu32, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divs64, TCG_CALL_NO_WG, s64, env, s64, s64)
+DEF_HELPER_FLAGS_4(divu64, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_4(srst, i64, env, i64, i64, i64)
+DEF_HELPER_4(clst, i64, env, i64, i64, i64)
+DEF_HELPER_4(mvpg, void, env, i64, i64, i64)
+DEF_HELPER_4(mvst, i64, env, i64, i64, i64)
+DEF_HELPER_5(ex, i32, env, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_4(stam, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(lam, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_4(mvcle, i32, env, i32, i64, i32)
+DEF_HELPER_4(clcle, i32, env, i32, i64, i32)
+DEF_HELPER_3(cegb, i64, env, s64, i32)
+DEF_HELPER_3(cdgb, i64, env, s64, i32)
+DEF_HELPER_3(cxgb, i64, env, s64, i32)
+DEF_HELPER_3(celgb, i64, env, i64, i32)
+DEF_HELPER_3(cdlgb, i64, env, i64, i32)
+DEF_HELPER_3(cxlgb, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(aeb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(adb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(axb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(seb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(sdb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(sxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(deb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(ddb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(dxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(meeb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mdeb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mdb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(mxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_4(mxdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_2(ldeb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_3(ldxb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(lxdb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(lxeb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(ledb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_3(lexb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(ceb, TCG_CALL_NO_WG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_3(cdb, TCG_CALL_NO_WG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_5(cxb, TCG_CALL_NO_WG_SE, i32, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(cgeb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(cgdb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(cgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_3(cfeb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(cfdb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(cfxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_3(clgeb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(clgdb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(clgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_3(clfeb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(clfdb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(clfxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_3(fieb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(fidb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(fixb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_4(maeb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(madb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(mseb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(msdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_2(tceb, TCG_CALL_NO_RWG_SE, i32, i64, i64)
+DEF_HELPER_FLAGS_2(tcdb, TCG_CALL_NO_RWG_SE, i32, i64, i64)
+DEF_HELPER_FLAGS_3(tcxb, TCG_CALL_NO_RWG_SE, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_3(sqxb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_1(cvd, TCG_CALL_NO_RWG_SE, i64, s32)
+DEF_HELPER_FLAGS_4(unpk, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(tr, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_4(tre, i64, env, i64, i64, i64)
+DEF_HELPER_4(trt, i32, env, i32, i64, i64)
+DEF_HELPER_4(cksm, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_5(calc_cc, TCG_CALL_NO_RWG_SE, i32, env, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_2(sfpc, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_3(servc, i32, env, i64, i64)
+DEF_HELPER_4(diag, void, env, i32, i32, i32)
+DEF_HELPER_3(load_psw, noreturn, env, i64, i64)
+DEF_HELPER_FLAGS_2(spx, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_1(stck, TCG_CALL_NO_RWG_SE, i64, env)
+DEF_HELPER_FLAGS_2(sckc, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_1(stckc, TCG_CALL_NO_RWG, i64, env)
+DEF_HELPER_FLAGS_2(spt, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_1(stpt, TCG_CALL_NO_RWG, i64, env)
+DEF_HELPER_4(stsi, i32, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(lctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(lctlg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(stctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(stctg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_2(tprot, TCG_CALL_NO_RWG, i32, i64, i64)
+DEF_HELPER_FLAGS_2(iske, TCG_CALL_NO_RWG_SE, i64, env, i64)
+DEF_HELPER_FLAGS_3(sske, TCG_CALL_NO_RWG, void, env, i64, i64)
+DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_NO_RWG, i32, env, i64)
+DEF_HELPER_3(csp, i32, env, i32, i64)
+DEF_HELPER_4(mvcs, i32, env, i64, i64, i64)
+DEF_HELPER_4(mvcp, i32, env, i64, i64, i64)
+DEF_HELPER_4(sigp, i32, env, i64, i32, i64)
+DEF_HELPER_FLAGS_2(sacf, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_3(ipte, TCG_CALL_NO_RWG, void, env, i64, i64)
+DEF_HELPER_FLAGS_1(ptlb, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_2(lra, i64, env, i64)
+DEF_HELPER_FLAGS_2(lura, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(lurag, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_3(stura, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_FLAGS_3(sturg, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_1(per_check_exception, void, env)
+DEF_HELPER_FLAGS_3(per_branch, TCG_CALL_NO_RWG, void, env, i64, i64)
+DEF_HELPER_FLAGS_2(per_ifetch, TCG_CALL_NO_RWG, void, env, i64)
+
+DEF_HELPER_2(xsch, void, env, i64)
+DEF_HELPER_2(csch, void, env, i64)
+DEF_HELPER_2(hsch, void, env, i64)
+DEF_HELPER_3(msch, void, env, i64, i64)
+DEF_HELPER_2(rchp, void, env, i64)
+DEF_HELPER_2(rsch, void, env, i64)
+DEF_HELPER_3(ssch, void, env, i64, i64)
+DEF_HELPER_3(stsch, void, env, i64, i64)
+DEF_HELPER_3(tsch, void, env, i64, i64)
+DEF_HELPER_2(chsc, void, env, i64)
+#endif
diff --git a/qemu/target-s390x/insn-data.def b/qemu/target-s390x/insn-data.def
new file mode 100644
index 000000000..075ff597c
--- /dev/null
+++ b/qemu/target-s390x/insn-data.def
@@ -0,0 +1,931 @@
+/*
+ *  Arguments to the opcode prototypes
+ *
+ *  C(OPC,    NAME,    FMT,   FAC, I1, I2, P, W, OP, CC)
+ *  D(OPC,    NAME,    FMT,   FAC, I1, I2, P, W, OP, CC, DATA)
+ *
+ *  OPC  = (op << 8) | op2 where op is the major, op2 the minor opcode
+ *  NAME = name of the opcode, used internally
+ *  FMT  = format of the opcode (defined in insn-format.def)
+ *  FAC  = facility the opcode is available in (defined in DisasFacility)
+ *  I1   = func in1_xx fills o->in1
+ *  I2   = func in2_xx fills o->in2
+ *  P    = func prep_xx initializes o->*out*
+ *  W    = func wout_xx writes o->*out* somewhere
+ *  OP   = func op_xx does the bulk of the operation
+ *  CC   = func cout_xx defines how cc should get set
+ *  DATA = immediate argument to op_xx function
+ *
+ *  The helpers get called in order: I1, I2, P, OP, W, CC
+ */
+
+/* ADD */
+    C(0x1a00, AR,      RR_a,  Z,   r1, r2, new, r1_32, add, adds32)
+    C(0xb9f8, ARK,     RRF_a, DO,  r2, r3, new, r1_32, add, adds32)
+    C(0x5a00, A,       RX_a,  Z,   r1, m2_32s, new, r1_32, add, adds32)
+    C(0xe35a, AY,      RXY_a, LD,  r1, m2_32s, new, r1_32, add, adds32)
+    C(0xb908, AGR,     RRE,   Z,   r1, r2, r1, 0, add, adds64)
+    C(0xb918, AGFR,    RRE,   Z,   r1, r2_32s, r1, 0, add, adds64)
+    C(0xb9e8, AGRK,    RRF_a, DO,  r2, r3, r1, 0, add, adds64)
+    C(0xe308, AG,      RXY_a, Z,   r1, m2_64, r1, 0, add, adds64)
+    C(0xe318, AGF,     RXY_a, Z,   r1, m2_32s, r1, 0, add, adds64)
+    C(0xb30a, AEBR,    RRE,   Z,   e1, e2, new, e1, aeb, f32)
+    C(0xb31a, ADBR,    RRE,   Z,   f1_o, f2_o, f1, 0, adb, f64)
+    C(0xb34a, AXBR,    RRE,   Z,   0, x2_o, x1, 0, axb, f128)
+    C(0xed0a, AEB,     RXE,   Z,   e1, m2_32u, new, e1, aeb, f32)
+    C(0xed1a, ADB,     RXE,   Z,   f1_o, m2_64, f1, 0, adb, f64)
+/* ADD HIGH */
+    C(0xb9c8, AHHHR,   RRF_a, HW,  r2_sr32, r3_sr32, new, r1_32h, add, adds32)
+    C(0xb9d8, AHHLR,   RRF_a, HW,  r2_sr32, r3, new, r1_32h, add, adds32)
+/* ADD IMMEDIATE */
+    C(0xc209, AFI,     RIL_a, EI,  r1, i2, new, r1_32, add, adds32)
+    C(0xeb6a, ASI,     SIY,   GIE, m1_32s, i2, new, m1_32, add, adds32)
+    C(0xecd8, AHIK,    RIE_d, DO,  r3, i2, new, r1_32, add, adds32)
+    C(0xc208, AGFI,    RIL_a, EI,  r1, i2, r1, 0, add, adds64)
+    C(0xeb7a, AGSI,    SIY,   GIE, m1_64, i2, new, m1_64, add, adds64)
+    C(0xecd9, AGHIK,   RIE_d, DO,  r3, i2, r1, 0, add, adds64)
+/* ADD IMMEDIATE HIGH */
+    C(0xcc08, AIH,     RIL_a, HW,  r1_sr32, i2, new, r1_32h, add, adds32)
+/* ADD HALFWORD */
+    C(0x4a00, AH,      RX_a,  Z,   r1, m2_16s, new, r1_32, add, adds32)
+    C(0xe37a, AHY,     RXY_a, LD,  r1, m2_16s, new, r1_32, add, adds32)
+/* ADD HALFWORD IMMEDIATE */
+    C(0xa70a, AHI,     RI_a,  Z,   r1, i2, new, r1_32, add, adds32)
+    C(0xa70b, AGHI,    RI_a,  Z,   r1, i2, r1, 0, add, adds64)
+
+/* ADD LOGICAL */
+    C(0x1e00, ALR,     RR_a,  Z,   r1, r2, new, r1_32, add, addu32)
+    C(0xb9fa, ALRK,    RRF_a, DO,  r2, r3, new, r1_32, add, addu32)
+    C(0x5e00, AL,      RX_a,  Z,   r1, m2_32u, new, r1_32, add, addu32)
+    C(0xe35e, ALY,     RXY_a, LD,  r1, m2_32u, new, r1_32, add, addu32)
+    C(0xb90a, ALGR,    RRE,   Z,   r1, r2, r1, 0, add, addu64)
+    C(0xb91a, ALGFR,   RRE,   Z,   r1, r2_32u, r1, 0, add, addu64)
+    C(0xb9ea, ALGRK,   RRF_a, DO,  r2, r3, r1, 0, add, addu64)
+    C(0xe30a, ALG,     RXY_a, Z,   r1, m2_64, r1, 0, add, addu64)
+    C(0xe31a, ALGF,    RXY_a, Z,   r1, m2_32u, r1, 0, add, addu64)
+/* ADD LOGICAL HIGH */
+    C(0xb9ca, ALHHHR,  RRF_a, HW,  r2_sr32, r3_sr32, new, r1_32h, add, addu32)
+    C(0xb9da, ALHHLR,  RRF_a, HW,  r2_sr32, r3, new, r1_32h, add, addu32)
+/* ADD LOGICAL IMMEDIATE */
+    C(0xc20b, ALFI,    RIL_a, EI,  r1, i2_32u, new, r1_32, add, addu32)
+    C(0xc20a, ALGFI,   RIL_a, EI,  r1, i2_32u, r1, 0, add, addu64)
+/* ADD LOGICAL WITH SIGNED IMMEDIATE */
+    C(0xeb6e, ALSI,    SIY,   GIE, m1_32u, i2, new, m1_32, add, addu32)
+    C(0xecda, ALHSIK,  RIE_d, DO,  r3, i2, new, r1_32, add, addu32)
+    C(0xeb7e, ALGSI,   SIY,   GIE, m1_64, i2, new, m1_64, add, addu64)
+    C(0xecdb, ALGHSIK, RIE_d, DO,  r3, i2, r1, 0, add, addu64)
+/* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */
+    C(0xcc0a, ALSIH,   RIL_a, HW,  r1_sr32, i2, new, r1_32h, add, addu32)
+    C(0xcc0b, ALSIHN,  RIL_a, HW,  r1_sr32, i2, new, r1_32h, add, 0)
+/* ADD LOGICAL WITH CARRY */
+    C(0xb998, ALCR,    RRE,   Z,   r1, r2, new, r1_32, addc, addc32)
+    C(0xb988, ALCGR,   RRE,   Z,   r1, r2, r1, 0, addc, addc64)
+    C(0xe398, ALC,     RXY_a, Z,   r1, m2_32u, new, r1_32, addc, addc32)
+    C(0xe388, ALCG,    RXY_a, Z,   r1, m2_64, r1, 0, addc, addc64)
+
+/* AND */
+    C(0x1400, NR,      RR_a,  Z,   r1, r2, new, r1_32, and, nz32)
+    C(0xb9f4, NRK,     RRF_a, DO,  r2, r3, new, r1_32, and, nz32)
+    C(0x5400, N,       RX_a,  Z,   r1, m2_32s, new, r1_32, and, nz32)
+    C(0xe354, NY,      RXY_a, LD,  r1, m2_32s, new, r1_32, and, nz32)
+    C(0xb980, NGR,     RRE,   Z,   r1, r2, r1, 0, and, nz64)
+    C(0xb9e4, NGRK,    RRF_a, DO,  r2, r3, r1, 0, and, nz64)
+    C(0xe380, NG,      RXY_a, Z,   r1, m2_64, r1, 0, and, nz64)
+    C(0xd400, NC,      SS_a,  Z,   la1, a2, 0, 0, nc, 0)
+/* AND IMMEDIATE */
+    D(0xc00a, NIHF,    RIL_a, EI,  r1_o, i2_32u, r1, 0, andi, 0, 0x2020)
+    D(0xc00b, NILF,    RIL_a, EI,  r1_o, i2_32u, r1, 0, andi, 0, 0x2000)
+    D(0xa504, NIHH,    RI_a,  Z,   r1_o, i2_16u, r1, 0, andi, 0, 0x1030)
+    D(0xa505, NIHL,    RI_a,  Z,   r1_o, i2_16u, r1, 0, andi, 0, 0x1020)
+    D(0xa506, NILH,    RI_a,  Z,   r1_o, i2_16u, r1, 0, andi, 0, 0x1010)
+    D(0xa507, NILL,    RI_a,  Z,   r1_o, i2_16u, r1, 0, andi, 0, 0x1000)
+    C(0x9400, NI,      SI,    Z,   m1_8u, i2_8u, new, m1_8, and, nz64)
+    C(0xeb54, NIY,     SIY,   LD,  m1_8u, i2_8u, new, m1_8, and, nz64)
+
+/* BRANCH AND SAVE */
+    C(0x0d00, BASR,    RR_a,  Z,   0, r2_nz, r1, 0, bas, 0)
+    C(0x4d00, BAS,     RX_a,  Z,   0, a2, r1, 0, bas, 0)
+/* BRANCH RELATIVE AND SAVE */
+    C(0xa705, BRAS,    RI_b,  Z,   0, 0, r1, 0, basi, 0)
+    C(0xc005, BRASL,   RIL_b, Z,   0, 0, r1, 0, basi, 0)
+/* BRANCH ON CONDITION */
+    C(0x0700, BCR,     RR_b,  Z,   0, r2_nz, 0, 0, bc, 0)
+    C(0x4700, BC,      RX_b,  Z,   0, a2, 0, 0, bc, 0)
+/* BRANCH RELATIVE ON CONDITION */
+    C(0xa704, BRC,     RI_c,  Z,   0, 0, 0, 0, bc, 0)
+    C(0xc004, BRCL,    RIL_c, Z,   0, 0, 0, 0, bc, 0)
+/* BRANCH ON COUNT */
+    C(0x0600, BCTR,    RR_a,  Z,   0, r2_nz, 0, 0, bct32, 0)
+    C(0xb946, BCTGR,   RRE,   Z,   0, r2_nz, 0, 0, bct64, 0)
+    C(0x4600, BCT,     RX_a,  Z,   0, a2, 0, 0, bct32, 0)
+    C(0xe346, BCTG,    RXY_a, Z,   0, a2, 0, 0, bct64, 0)
+/* BRANCH RELATIVE ON COUNT */
+    C(0xa706, BRCT,    RI_b,  Z,   0, 0, 0, 0, bct32, 0)
+    C(0xa707, BRCTG,   RI_b,  Z,   0, 0, 0, 0, bct64, 0)
+/* BRANCH RELATIVE ON COUNT HIGH */
+    C(0xcc06, BRCTH,   RIL_b, HW,  0, 0, 0, 0, bcth, 0)
+/* BRANCH ON INDEX */
+    D(0x8600, BXH,     RS_a,  Z,   0, a2, 0, 0, bx32, 0, 0)
+    D(0x8700, BXLE,    RS_a,  Z,   0, a2, 0, 0, bx32, 0, 1)
+    D(0xeb44, BXHG,    RSY_a, Z,   0, a2, 0, 0, bx64, 0, 0)
+    D(0xeb45, BXLEG,   RSY_a, Z,   0, a2, 0, 0, bx64, 0, 1)
+/* BRANCH RELATIVE ON INDEX */
+    D(0x8400, BRXH,    RSI,   Z,   0, 0, 0, 0, bx32, 0, 0)
+    D(0x8500, BRXLE,   RSI,   Z,   0, 0, 0, 0, bx32, 0, 1)
+    D(0xec44, BRXHG,   RIE_e, Z,   0, 0, 0, 0, bx64, 0, 0)
+    D(0xec45, BRXHLE,  RIE_e, Z,   0, 0, 0, 0, bx64, 0, 1)
+
+/* CHECKSUM */
+    C(0xb241, CKSM,    RRE,   Z,   r1_o, ra2, new, r1_32, cksm, 0)
+
+/* COPY SIGN */
+    C(0xb372, CPSDR,   RRF_b, FPSSH, f3_o, f2_o, f1, 0, cps, 0)
+
+/* COMPARE */
+    C(0x1900, CR,      RR_a,  Z,   r1_o, r2_o, 0, 0, 0, cmps32)
+    C(0x5900, C,       RX_a,  Z,   r1_o, m2_32s, 0, 0, 0, cmps32)
+    C(0xe359, CY,      RXY_a, LD,  r1_o, m2_32s, 0, 0, 0, cmps32)
+    C(0xb920, CGR,     RRE,   Z,   r1_o, r2_o, 0, 0, 0, cmps64)
+    C(0xb930, CGFR,    RRE,   Z,   r1_o, r2_32s, 0, 0, 0, cmps64)
+    C(0xe320, CG,      RXY_a, Z,   r1_o, m2_64, 0, 0, 0, cmps64)
+    C(0xe330, CGF,     RXY_a, Z,   r1_o, m2_32s, 0, 0, 0, cmps64)
+    C(0xb309, CEBR,    RRE,   Z,   e1, e2, 0, 0, ceb, 0)
+    C(0xb319, CDBR,    RRE,   Z,   f1_o, f2_o, 0, 0, cdb, 0)
+    C(0xb349, CXBR,    RRE,   Z,   x1_o, x2_o, 0, 0, cxb, 0)
+    C(0xed09, CEB,     RXE,   Z,   e1, m2_32u, 0, 0, ceb, 0)
+    C(0xed19, CDB,     RXE,   Z,   f1_o, m2_64, 0, 0, cdb, 0)
+/* COMPARE IMMEDIATE */
+    C(0xc20d, CFI,     RIL_a, EI,  r1, i2, 0, 0, 0, cmps32)
+    C(0xc20c, CGFI,    RIL_a, EI,  r1, i2, 0, 0, 0, cmps64)
+/* COMPARE RELATIVE LONG */
+    C(0xc60d, CRL,     RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps32)
+    C(0xc608, CGRL,    RIL_b, GIE, r1, mri2_64, 0, 0, 0, cmps64)
+    C(0xc60c, CGFRL,   RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps64)
+/* COMPARE HALFWORD */
+    C(0x4900, CH,      RX_a,  Z,   r1_o, m2_16s, 0, 0, 0, cmps32)
+    C(0xe379, CHY,     RXY_a, LD,  r1_o, m2_16s, 0, 0, 0, cmps32)
+    C(0xe334, CGH,     RXY_a, GIE, r1_o, m2_16s, 0, 0, 0, cmps64)
+/* COMPARE HALFWORD IMMEDIATE */
+    C(0xa70e, CHI,     RI_a,  Z,   r1_o, i2, 0, 0, 0, cmps32)
+    C(0xa70f, CGHI,    RI_a,  Z,   r1_o, i2, 0, 0, 0, cmps64)
+    C(0xe554, CHHSI,   SIL,   GIE, m1_16s, i2, 0, 0, 0, cmps64)
+    C(0xe55c, CHSI,    SIL,   GIE, m1_32s, i2, 0, 0, 0, cmps64)
+    C(0xe558, CGHSI,   SIL,   GIE, m1_64, i2, 0, 0, 0, cmps64)
+/* COMPARE HALFWORD RELATIVE LONG */
+    C(0xc605, CHRL,    RIL_b, GIE, r1_o, mri2_32s, 0, 0, 0, cmps32)
+    C(0xc604, CGHRL,   RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmps64)
+/* COMPARE HIGH */
+    C(0xb9cd, CHHR,    RRE,   HW,  r1_sr32, r2_sr32, 0, 0, 0, cmps32)
+    C(0xb9dd, CHLR,    RRE,   HW,  r1_sr32, r2_o, 0, 0, 0, cmps32)
+    C(0xe3cd, CHF,     RXY_a, HW,  r1_sr32, m2_32s, 0, 0, 0, cmps32)
+/* COMPARE IMMEDIATE HIGH */
+    C(0xcc0d, CIH,     RIL_a, HW,  r1_sr32, i2, 0, 0, 0, cmps32)
+
+/* COMPARE LOGICAL */
+    C(0x1500, CLR,     RR_a,  Z,   r1, r2, 0, 0, 0, cmpu32)
+    C(0x5500, CL,      RX_a,  Z,   r1, m2_32s, 0, 0, 0, cmpu32)
+    C(0xe355, CLY,     RXY_a, LD,  r1, m2_32s, 0, 0, 0, cmpu32)
+    C(0xb921, CLGR,    RRE,   Z,   r1, r2, 0, 0, 0, cmpu64)
+    C(0xb931, CLGFR,   RRE,   Z,   r1, r2_32u, 0, 0, 0, cmpu64)
+    C(0xe321, CLG,     RXY_a, Z,   r1, m2_64, 0, 0, 0, cmpu64)
+    C(0xe331, CLGF,    RXY_a, Z,   r1, m2_32u, 0, 0, 0, cmpu64)
+    C(0xd500, CLC,     SS_a,  Z,   la1, a2, 0, 0, clc, 0)
+/* COMPARE LOGICAL HIGH */
+    C(0xb9cf, CLHHR,   RRE,   HW,  r1_sr32, r2_sr32, 0, 0, 0, cmpu32)
+    C(0xb9df, CLHLR,   RRE,   HW,  r1_sr32, r2_o, 0, 0, 0, cmpu32)
+    C(0xe3cf, CLHF,    RXY_a, HW,  r1_sr32, m2_32s, 0, 0, 0, cmpu32)
+/* COMPARE LOGICAL IMMEDIATE */
+    C(0xc20f, CLFI,    RIL_a, EI,  r1, i2, 0, 0, 0, cmpu32)
+    C(0xc20e, CLGFI,   RIL_a, EI,  r1, i2_32u, 0, 0, 0, cmpu64)
+    C(0x9500, CLI,     SI,    Z,   m1_8u, i2_8u, 0, 0, 0, cmpu64)
+    C(0xeb55, CLIY,    SIY,   LD,  m1_8u, i2_8u, 0, 0, 0, cmpu64)
+    C(0xe555, CLHHSI,  SIL,   GIE, m1_16u, i2_16u, 0, 0, 0, cmpu64)
+    C(0xe55d, CLFHSI,  SIL,   GIE, m1_32u, i2_16u, 0, 0, 0, cmpu64)
+    C(0xe559, CLGHSI,  SIL,   GIE, m1_64, i2_16u, 0, 0, 0, cmpu64)
+/* COMPARE LOGICAL IMMEDIATE HIGH */
+    C(0xcc0f, CLIH,    RIL_a, HW,  r1_sr32, i2, 0, 0, 0, cmpu32)
+/* COMPARE LOGICAL RELATIVE LONG */
+    C(0xc60f, CLRL,    RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu32)
+    C(0xc60a, CLGRL,   RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmpu64)
+    C(0xc60e, CLGFRL,  RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu64)
+    C(0xc607, CLHRL,   RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu32)
+    C(0xc606, CLGHRL,  RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu64)
+/* COMPARE LOGICAL LONG EXTENDED */
+    C(0xa900, CLCLE,   RS_a,  Z,   0, a2, 0, 0, clcle, 0)
+/* COMPARE LOGICAL CHARACTERS UNDER MASK */
+    C(0xbd00, CLM,     RS_b,  Z,   r1_o, a2, 0, 0, clm, 0)
+    C(0xeb21, CLMY,    RSY_b, LD,  r1_o, a2, 0, 0, clm, 0)
+    C(0xeb20, CLMH,    RSY_b, Z,   r1_sr32, a2, 0, 0, clm, 0)
+/* COMPARE LOGICAL STRING */
+    C(0xb25d, CLST,    RRE,   Z,   r1_o, r2_o, 0, 0, clst, 0)
+
+/* COMPARE AND BRANCH */
+    D(0xecf6, CRB,     RRS,   GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0)
+    D(0xece4, CGRB,    RRS,   GIE, r1_o, r2_o, 0, 0, cj, 0, 0)
+    D(0xec76, CRJ,     RIE_b, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0)
+    D(0xec64, CGRJ,    RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 0)
+    D(0xecfe, CIB,     RIS,   GIE, r1_32s, i2, 0, 0, cj, 0, 0)
+    D(0xecfc, CGIB,    RIS,   GIE, r1_o, i2, 0, 0, cj, 0, 0)
+    D(0xec7e, CIJ,     RIE_c, GIE, r1_32s, i2, 0, 0, cj, 0, 0)
+    D(0xec7c, CGIJ,    RIE_c, GIE, r1_o, i2, 0, 0, cj, 0, 0)
+/* COMPARE LOGICAL AND BRANCH */
+    D(0xecf7, CLRB,    RRS,   GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1)
+    D(0xece5, CLGRB,   RRS,   GIE, r1_o, r2_o, 0, 0, cj, 0, 1)
+    D(0xec77, CLRJ,    RIE_b, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1)
+    D(0xec65, CLGRJ,   RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 1)
+    D(0xecff, CLIB,    RIS,   GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1)
+    D(0xecfd, CLGIB,   RIS,   GIE, r1_o, i2_8u, 0, 0, cj, 0, 1)
+    D(0xec7f, CLIJ,    RIE_c, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1)
+    D(0xec7d, CLGIJ,   RIE_c, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1)
+
+/* COMPARE AND SWAP */
+    D(0xba00, CS,      RS_a,  Z,   r3_32u, r1_32u, new, r1_32, cs, 0, 0)
+    D(0xeb14, CSY,     RSY_a, LD,  r3_32u, r1_32u, new, r1_32, cs, 0, 0)
+    D(0xeb30, CSG,     RSY_a, Z,   r3_o, r1_o, new, r1, cs, 0, 1)
+/* COMPARE DOUBLE AND SWAP */
+    D(0xbb00, CDS,     RS_a,  Z,   r3_D32, r1_D32, new, r1_D32, cs, 0, 1)
+    D(0xeb31, CDSY,    RSY_a, LD,  r3_D32, r1_D32, new, r1_D32, cs, 0, 1)
+    C(0xeb3e, CDSG,    RSY_a, Z,   0, 0, 0, 0, cdsg, 0)
+
+/* COMPARE AND TRAP */
+    D(0xb972, CRT,     RRF_c, GIE, r1_32s, r2_32s, 0, 0, ct, 0, 0)
+    D(0xb960, CGRT,    RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 0)
+    D(0xec72, CIT,     RIE_a, GIE, r1_32s, i2, 0, 0, ct, 0, 0)
+    D(0xec70, CGIT,    RIE_a, GIE, r1_o, i2, 0, 0, ct, 0, 0)
+/* COMPARE LOGICAL AND TRAP */
+    D(0xb973, CLRT,    RRF_c, GIE, r1_32u, r2_32u, 0, 0, ct, 0, 1)
+    D(0xb961, CLGRT,   RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 1)
+    D(0xeb23, CLT,     RSY_b, MIE, r1_32u, m2_32u, 0, 0, ct, 0, 1)
+    D(0xeb2b, CLGT,    RSY_b, MIE, r1_o, m2_64, 0, 0, ct, 0, 1)
+    D(0xec73, CLFIT,   RIE_a, GIE, r1_32u, i2_32u, 0, 0, ct, 0, 1)
+    D(0xec71, CLGIT,   RIE_a, GIE, r1_o, i2_32u, 0, 0, ct, 0, 1)
+
+/* CONVERT TO DECIMAL */
+    C(0x4e00, CVD,     RX_a,  Z,   r1_o, a2, 0, 0, cvd, 0)
+    C(0xe326, CVDY,    RXY_a, LD,  r1_o, a2, 0, 0, cvd, 0)
+/* CONVERT TO FIXED */
+    C(0xb398, CFEBR,   RRF_e, Z,   0, e2, new, r1_32, cfeb, 0)
+    C(0xb399, CFDBR,   RRF_e, Z,   0, f2_o, new, r1_32, cfdb, 0)
+    C(0xb39a, CFXBR,   RRF_e, Z,   0, x2_o, new, r1_32, cfxb, 0)
+    C(0xb3a8, CGEBR,   RRF_e, Z,   0, e2, r1, 0, cgeb, 0)
+    C(0xb3a9, CGDBR,   RRF_e, Z,   0, f2_o, r1, 0, cgdb, 0)
+    C(0xb3aa, CGXBR,   RRF_e, Z,   0, x2_o, r1, 0, cgxb, 0)
+/* CONVERT FROM FIXED */
+    C(0xb394, CEFBR,   RRF_e, Z,   0, r2_32s, new, e1, cegb, 0)
+    C(0xb395, CDFBR,   RRF_e, Z,   0, r2_32s, f1, 0, cdgb, 0)
+    C(0xb396, CXFBR,   RRF_e, Z,   0, r2_32s, x1, 0, cxgb, 0)
+    C(0xb3a4, CEGBR,   RRF_e, Z,   0, r2_o, new, e1, cegb, 0)
+    C(0xb3a5, CDGBR,   RRF_e, Z,   0, r2_o, f1, 0, cdgb, 0)
+    C(0xb3a6, CXGBR,   RRF_e, Z,   0, r2_o, x1, 0, cxgb, 0)
+/* CONVERT TO LOGICAL */
+    C(0xb39c, CLFEBR,  RRF_e, FPE, 0, e2, new, r1_32, clfeb, 0)
+    C(0xb39d, CLFDBR,  RRF_e, FPE, 0, f2_o, new, r1_32, clfdb, 0)
+    C(0xb39e, CLFXBR,  RRF_e, FPE, 0, x2_o, new, r1_32, clfxb, 0)
+    C(0xb3ac, CLGEBR,  RRF_e, FPE, 0, e2, r1, 0, clgeb, 0)
+    C(0xb3ad, CLGDBR,  RRF_e, FPE, 0, f2_o, r1, 0, clgdb, 0)
+    C(0xb3ae, CLGXBR,  RRF_e, FPE, 0, x2_o, r1, 0, clgxb, 0)
+/* CONVERT FROM LOGICAL */
+    C(0xb390, CELFBR,  RRF_e, FPE, 0, r2_32u, new, e1, celgb, 0)
+    C(0xb391, CDLFBR,  RRF_e, FPE, 0, r2_32u, f1, 0, cdlgb, 0)
+    C(0xb392, CXLFBR,  RRF_e, FPE, 0, r2_32u, x1, 0, cxlgb, 0)
+    C(0xb3a0, CELGBR,  RRF_e, FPE, 0, r2_o, new, e1, celgb, 0)
+    C(0xb3a1, CDLGBR,  RRF_e, FPE, 0, r2_o, f1, 0, cdlgb, 0)
+    C(0xb3a2, CXLGBR,  RRF_e, FPE, 0, r2_o, x1, 0, cxlgb, 0)
+
+/* DIVIDE */
+    C(0x1d00, DR,      RR_a,  Z,   r1_D32, r2_32s, new_P, r1_P32, divs32, 0)
+    C(0x5d00, D,       RX_a,  Z,   r1_D32, m2_32s, new_P, r1_P32, divs32, 0)
+    C(0xb30d, DEBR,    RRE,   Z,   e1, e2, new, e1, deb, 0)
+    C(0xb31d, DDBR,    RRE,   Z,   f1_o, f2_o, f1, 0, ddb, 0)
+    C(0xb34d, DXBR,    RRE,   Z,   0, x2_o, x1, 0, dxb, 0)
+    C(0xed0d, DEB,     RXE,   Z,   e1, m2_32u, new, e1, deb, 0)
+    C(0xed1d, DDB,     RXE,   Z,   f1_o, m2_64, f1, 0, ddb, 0)
+/* DIVIDE LOGICAL */
+    C(0xb997, DLR,     RRE,   Z,   r1_D32, r2_32u, new_P, r1_P32, divu32, 0)
+    C(0xe397, DL,      RXY_a, Z,   r1_D32, m2_32u, new_P, r1_P32, divu32, 0)
+    C(0xb987, DLGR,    RRE,   Z,   0, r2_o, r1_P, 0, divu64, 0)
+    C(0xe387, DLG,     RXY_a, Z,   0, m2_64, r1_P, 0, divu64, 0)
+/* DIVIDE SINGLE */
+    C(0xb90d, DSGR,    RRE,   Z,   r1p1, r2, r1_P, 0, divs64, 0)
+    C(0xb91d, DSGFR,   RRE,   Z,   r1p1, r2_32s, r1_P, 0, divs64, 0)
+    C(0xe30d, DSG,     RXY_a, Z,   r1p1, m2_64, r1_P, 0, divs64, 0)
+    C(0xe31d, DSGF,    RXY_a, Z,   r1p1, m2_32s, r1_P, 0, divs64, 0)
+
+/* EXCLUSIVE OR */
+    C(0x1700, XR,      RR_a,  Z,   r1, r2, new, r1_32, xor, nz32)
+    C(0xb9f7, XRK,     RRF_a, DO,  r2, r3, new, r1_32, xor, nz32)
+    C(0x5700, X,       RX_a,  Z,   r1, m2_32s, new, r1_32, xor, nz32)
+    C(0xe357, XY,      RXY_a, LD,  r1, m2_32s, new, r1_32, xor, nz32)
+    C(0xb982, XGR,     RRE,   Z,   r1, r2, r1, 0, xor, nz64)
+    C(0xb9e7, XGRK,    RRF_a, DO,  r2, r3, r1, 0, xor, nz64)
+    C(0xe382, XG,      RXY_a, Z,   r1, m2_64, r1, 0, xor, nz64)
+    C(0xd700, XC,      SS_a,  Z,   0, 0, 0, 0, xc, 0)
+/* EXCLUSIVE OR IMMEDIATE */
+    D(0xc006, XIHF,    RIL_a, EI,  r1_o, i2_32u, r1, 0, xori, 0, 0x2020)
+    D(0xc007, XILF,    RIL_a, EI,  r1_o, i2_32u, r1, 0, xori, 0, 0x2000)
+    C(0x9700, XI,      SI,    Z,   m1_8u, i2_8u, new, m1_8, xor, nz64)
+    C(0xeb57, XIY,     SIY,   LD,  m1_8u, i2_8u, new, m1_8, xor, nz64)
+
+/* EXECUTE */
+    C(0x4400, EX,      RX_a,  Z,   r1_o, a2, 0, 0, ex, 0)
+/* EXECUTE RELATIVE LONG */
+    C(0xc600, EXRL,    RIL_b, EE,  r1_o, ri2, 0, 0, ex, 0)
+
+/* EXTRACT ACCESS */
+    C(0xb24f, EAR,     RRE,   Z,   0, 0, new, r1_32, ear, 0)
+/* EXTRACT CPU ATTRIBUTE */
+    C(0xeb4c, ECAG,    RSY_a, GIE, 0, a2, r1, 0, ecag, 0)
+/* EXTRACT FPC */
+    C(0xb38c, EFPC,    RRE,   Z,   0, 0, new, r1_32, efpc, 0)
+/* EXTRACT PSW */
+    C(0xb98d, EPSW,    RRE,   Z,   0, 0, 0, 0, epsw, 0)
+
+/* FIND LEFTMOST ONE */
+    C(0xb983, FLOGR,   RRE,   EI,  0, r2_o, r1_P, 0, flogr, 0)
+
+/* INSERT CHARACTER */
+    C(0x4300, IC,      RX_a,  Z,   0, m2_8u, 0, r1_8, mov2, 0)
+    C(0xe373, ICY,     RXY_a, LD,  0, m2_8u, 0, r1_8, mov2, 0)
+/* INSERT CHARACTERS UNDER MASK */
+    D(0xbf00, ICM,     RS_b,  Z,   0, a2, r1, 0, icm, 0, 0)
+    D(0xeb81, ICMY,    RSY_b, LD,  0, a2, r1, 0, icm, 0, 0)
+    D(0xeb80, ICMH,    RSY_b, Z,   0, a2, r1, 0, icm, 0, 32)
+/* INSERT IMMEDIATE */
+    D(0xc008, IIHF,    RIL_a, EI,  r1_o, i2_32u, r1, 0, insi, 0, 0x2020)
+    D(0xc009, IILF,    RIL_a, EI,  r1_o, i2_32u, r1, 0, insi, 0, 0x2000)
+    D(0xa500, IIHH,    RI_a,  Z,   r1_o, i2_16u, r1, 0, insi, 0, 0x1030)
+    D(0xa501, IIHL,    RI_a,  Z,   r1_o, i2_16u, r1, 0, insi, 0, 0x1020)
+    D(0xa502, IILH,    RI_a,  Z,   r1_o, i2_16u, r1, 0, insi, 0, 0x1010)
+    D(0xa503, IILL,    RI_a,  Z,   r1_o, i2_16u, r1, 0, insi, 0, 0x1000)
+/* INSERT PROGRAM MASK */
+    C(0xb222, IPM,     RRE,   Z,   0, 0, r1, 0, ipm, 0)
+
+/* LOAD */
+    C(0x1800, LR,      RR_a,  Z,   0, r2_o, 0, cond_r1r2_32, mov2, 0)
+    C(0x5800, L,       RX_a,  Z,   0, a2, new, r1_32, ld32s, 0)
+    C(0xe358, LY,      RXY_a, LD,  0, a2, new, r1_32, ld32s, 0)
+    C(0xb904, LGR,     RRE,   Z,   0, r2_o, 0, r1, mov2, 0)
+    C(0xb914, LGFR,    RRE,   Z,   0, r2_32s, 0, r1, mov2, 0)
+    C(0xe304, LG,      RXY_a, Z,   0, a2, r1, 0, ld64, 0)
+    C(0xe314, LGF,     RXY_a, Z,   0, a2, r1, 0, ld32s, 0)
+    C(0x2800, LDR,     RR_a,  Z,   0, f2_o, 0, f1, mov2, 0)
+    C(0x6800, LD,      RX_a,  Z,   0, m2_64, 0, f1, mov2, 0)
+    C(0xed65, LDY,     RXY_a, LD,  0, m2_64, 0, f1, mov2, 0)
+    C(0x3800, LER,     RR_a,  Z,   0, e2, 0, cond_e1e2, mov2, 0)
+    C(0x7800, LE,      RX_a,  Z,   0, m2_32u, 0, e1, mov2, 0)
+    C(0xed64, LEY,     RXY_a, LD,  0, m2_32u, 0, e1, mov2, 0)
+    C(0xb365, LXR,     RRE,   Z,   0, x2_o, 0, x1, movx, 0)
+/* LOAD IMMEDIATE */
+    C(0xc001, LGFI,    RIL_a, EI,  0, i2, 0, r1, mov2, 0)
+/* LOAD RELATIVE LONG */
+    C(0xc40d, LRL,     RIL_b, GIE, 0, ri2, new, r1_32, ld32s, 0)
+    C(0xc408, LGRL,    RIL_b, GIE, 0, ri2, r1, 0, ld64, 0)
+    C(0xc40c, LGFRL,   RIL_b, GIE, 0, ri2, r1, 0, ld32s, 0)
+/* LOAD ADDRESS */
+    C(0x4100, LA,      RX_a,  Z,   0, a2, 0, r1, mov2, 0)
+    C(0xe371, LAY,     RXY_a, LD,  0, a2, 0, r1, mov2, 0)
+/* LOAD ADDRESS EXTENDED */
+    C(0x5100, LAE,     RX_a,  Z,   0, a2, 0, r1, mov2e, 0)
+    C(0xe375, LAEY,    RXY_a, GIE, 0, a2, 0, r1, mov2e, 0)
+/* LOAD ADDRESS RELATIVE LONG */
+    C(0xc000, LARL,    RIL_b, Z,   0, ri2, 0, r1, mov2, 0)
+/* LOAD AND ADD */
+    C(0xebf8, LAA,     RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, add, adds32)
+    C(0xebe8, LAAG,    RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, add, adds64)
+/* LOAD AND ADD LOGICAL */
+    C(0xebfa, LAAL,    RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, add, addu32)
+    C(0xebea, LAALG,   RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, add, addu64)
+/* LOAD AND AND */
+    C(0xebf4, LAN,     RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, and, nz32)
+    C(0xebe4, LANG,    RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, and, nz64)
+/* LOAD AND EXCLUSIVE OR */
+    C(0xebf7, LAX,     RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, xor, nz32)
+    C(0xebe7, LAXG,    RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, xor, nz64)
+/* LOAD AND OR */
+    C(0xebf6, LAO,     RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, or, nz32)
+    C(0xebe6, LAOG,    RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, or, nz64)
+/* LOAD AND TEST */
+    C(0x1200, LTR,     RR_a,  Z,   0, r2_o, 0, cond_r1r2_32, mov2, s32)
+    C(0xb902, LTGR,    RRE,   Z,   0, r2_o, 0, r1, mov2, s64)
+    C(0xb912, LTGFR,   RRE,   Z,   0, r2_32s, 0, r1, mov2, s64)
+    C(0xe312, LT,      RXY_a, EI,  0, a2, new, r1_32, ld32s, s64)
+    C(0xe302, LTG,     RXY_a, EI,  0, a2, r1, 0, ld64, s64)
+    C(0xe332, LTGF,    RXY_a, GIE, 0, a2, r1, 0, ld32s, s64)
+    C(0xb302, LTEBR,   RRE,   Z,   0, e2, 0, cond_e1e2, mov2, f32)
+    C(0xb312, LTDBR,   RRE,   Z,   0, f2_o, 0, f1, mov2, f64)
+    C(0xb342, LTXBR,   RRE,   Z,   0, x2_o, 0, x1, movx, f128)
+/* LOAD AND TRAP */
+    C(0xe39f, LAT,     RXY_a, LAT, 0, m2_32u, r1, 0, lat, 0)
+    C(0xe385, LGAT,    RXY_a, LAT, 0, a2, r1, 0, lgat, 0)
+/* LOAD BYTE */
+    C(0xb926, LBR,     RRE,   EI,  0, r2_8s, 0, r1_32, mov2, 0)
+    C(0xb906, LGBR,    RRE,   EI,  0, r2_8s, 0, r1, mov2, 0)
+    C(0xe376, LB,      RXY_a, LD,  0, a2, new, r1_32, ld8s, 0)
+    C(0xe377, LGB,     RXY_a, LD,  0, a2, r1, 0, ld8s, 0)
+/* LOAD BYTE HIGH */
+    C(0xe3c0, LBH,     RXY_a, HW,  0, a2, new, r1_32h, ld8s, 0)
+/* LOAD COMPLEMENT */
+    C(0x1300, LCR,     RR_a,  Z,   0, r2, new, r1_32, neg, neg32)
+    C(0xb903, LCGR,    RRE,   Z,   0, r2, r1, 0, neg, neg64)
+    C(0xb913, LCGFR,   RRE,   Z,   0, r2_32s, r1, 0, neg, neg64)
+    C(0xb303, LCEBR,   RRE,   Z,   0, e2, new, e1, negf32, f32)
+    C(0xb313, LCDBR,   RRE,   Z,   0, f2_o, f1, 0, negf64, f64)
+    C(0xb343, LCXBR,   RRE,   Z,   0, x2_o, x1, 0, negf128, f128)
+    C(0xb373, LCDFR,   RRE,   FPSSH, 0, f2_o, f1, 0, negf64, 0)
+/* LOAD HALFWORD */
+    C(0xb927, LHR,     RRE,   EI,  0, r2_16s, 0, r1_32, mov2, 0)
+    C(0xb907, LGHR,    RRE,   EI,  0, r2_16s, 0, r1, mov2, 0)
+    C(0x4800, LH,      RX_a,  Z,   0, a2, new, r1_32, ld16s, 0)
+    C(0xe378, LHY,     RXY_a, LD,  0, a2, new, r1_32, ld16s, 0)
+    C(0xe315, LGH,     RXY_a, Z,   0, a2, r1, 0, ld16s, 0)
+/* LOAD HALFWORD HIGH */
+    C(0xe3c4, LHH,     RXY_a, HW,  0, a2, new, r1_32h, ld16s, 0)
+/* LOAD HALFWORD IMMEDIATE */
+    C(0xa708, LHI,     RI_a,  Z,   0, i2, 0, r1_32, mov2, 0)
+    C(0xa709, LGHI,    RI_a,  Z,   0, i2, 0, r1, mov2, 0)
+/* LOAD HALFWORD RELATIVE LONG */
+    C(0xc405, LHRL,    RIL_b, GIE, 0, ri2, new, r1_32, ld16s, 0)
+    C(0xc404, LGHRL,   RIL_b, GIE, 0, ri2, r1, 0, ld16s, 0)
+/* LOAD HIGH */
+    C(0xe3ca, LFH,     RXY_a, HW,  0, a2, new, r1_32h, ld32u, 0)
+/* LOAG HIGH AND TRAP */
+    C(0xe3c8, LFHAT,   RXY_a, LAT, 0, m2_32u, r1, 0, lfhat, 0)
+/* LOAD LOGICAL */
+    C(0xb916, LLGFR,   RRE,   Z,   0, r2_32u, 0, r1, mov2, 0)
+    C(0xe316, LLGF,    RXY_a, Z,   0, a2, r1, 0, ld32u, 0)
+/* LOAD LOGICAL AND TRAP */
+    C(0xe39d, LLGFAT,  RXY_a, LAT, 0, a2, r1, 0, llgfat, 0)
+/* LOAD LOGICAL RELATIVE LONG */
+    C(0xc40e, LLGFRL,  RIL_b, GIE, 0, ri2, r1, 0, ld32u, 0)
+/* LOAD LOGICAL CHARACTER */
+    C(0xb994, LLCR,    RRE,   EI,  0, r2_8u, 0, r1_32, mov2, 0)
+    C(0xb984, LLGCR,   RRE,   EI,  0, r2_8u, 0, r1, mov2, 0)
+    C(0xe394, LLC,     RXY_a, EI,  0, a2, new, r1_32, ld8u, 0)
+    C(0xe390, LLGC,    RXY_a, Z,   0, a2, r1, 0, ld8u, 0)
+/* LOAD LOGICAL CHARACTER HIGH */
+    C(0xe3c2, LLCH,    RXY_a, HW,  0, a2, new, r1_32h, ld8u, 0)
+/* LOAD LOGICAL HALFWORD */
+    C(0xb995, LLHR,    RRE,   EI,  0, r2_16u, 0, r1_32, mov2, 0)
+    C(0xb985, LLGHR,   RRE,   EI,  0, r2_16u, 0, r1, mov2, 0)
+    C(0xe395, LLH,     RXY_a, EI,  0, a2, new, r1_32, ld16u, 0)
+    C(0xe391, LLGH,    RXY_a, Z,   0, a2, r1, 0, ld16u, 0)
+/* LOAD LOGICAL HALFWORD HIGH */
+    C(0xe3c6, LLHH,    RXY_a, HW,  0, a2, new, r1_32h, ld16u, 0)
+/* LOAD LOGICAL HALFWORD RELATIVE LONG */
+    C(0xc402, LLHRL,   RIL_b, GIE, 0, ri2, new, r1_32, ld16u, 0)
+    C(0xc406, LLGHRL,  RIL_b, GIE, 0, ri2, r1, 0, ld16u, 0)
+/* LOAD LOGICAL IMMEDATE */
+    D(0xc00e, LLIHF,   RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 32)
+    D(0xc00f, LLILF,   RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 0)
+    D(0xa50c, LLIHH,   RI_a,  Z,  0, i2_16u_shl, 0, r1, mov2, 0, 48)
+    D(0xa50d, LLIHL,   RI_a,  Z,  0, i2_16u_shl, 0, r1, mov2, 0, 32)
+    D(0xa50e, LLILH,   RI_a,  Z,  0, i2_16u_shl, 0, r1, mov2, 0, 16)
+    D(0xa50f, LLILL,   RI_a,  Z,  0, i2_16u_shl, 0, r1, mov2, 0, 0)
+/* LOAD LOGICAL THIRTY ONE BITS */
+    C(0xb917, LLGTR,   RRE,   Z,  0, r2_o, r1, 0, llgt, 0)
+    C(0xe317, LLGT,    RXY_a, Z,  0, m2_32u, r1, 0, llgt, 0)
+/* LOAD LOGICAL THIRTY ONE BITS AND TRAP */
+    C(0xe39c, LLGTAT,  RXY_a, LAT, 0, m2_32u, r1, 0, llgtat, 0)
+
+/* LOAD FPR FROM GR */
+    C(0xb3c1, LDGR,    RRE,   FPRGR, 0, r2_o, 0, f1, mov2, 0)
+/* LOAD GR FROM FPR */
+    C(0xb3cd, LGDR,    RRE,   FPRGR, 0, f2_o, 0, r1, mov2, 0)
+/* LOAD NEGATIVE */
+    C(0x1100, LNR,     RR_a,  Z,   0, r2_32s, new, r1_32, nabs, nabs32)
+    C(0xb901, LNGR,    RRE,   Z,   0, r2, r1, 0, nabs, nabs64)
+    C(0xb911, LNGFR,   RRE,   Z,   0, r2_32s, r1, 0, nabs, nabs64)
+    C(0xb301, LNEBR,   RRE,   Z,   0, e2, new, e1, nabsf32, f32)
+    C(0xb311, LNDBR,   RRE,   Z,   0, f2_o, f1, 0, nabsf64, f64)
+    C(0xb341, LNXBR,   RRE,   Z,   0, x2_o, x1, 0, nabsf128, f128)
+    C(0xb371, LNDFR,   RRE,   FPSSH, 0, f2_o, f1, 0, nabsf64, 0)
+/* LOAD ON CONDITION */
+    C(0xb9f2, LOCR,    RRF_c, LOC, r1, r2, new, r1_32, loc, 0)
+    C(0xb9e2, LOCGR,   RRF_c, LOC, r1, r2, r1, 0, loc, 0)
+    C(0xebf2, LOC,     RSY_b, LOC, r1, m2_32u, new, r1_32, loc, 0)
+    C(0xebe2, LOCG,    RSY_b, LOC, r1, m2_64, r1, 0, loc, 0)
+/* LOAD PAIR DISJOINT TODO */
+/* LOAD POSITIVE */
+    C(0x1000, LPR,     RR_a,  Z,   0, r2_32s, new, r1_32, abs, abs32)
+    C(0xb900, LPGR,    RRE,   Z,   0, r2, r1, 0, abs, abs64)
+    C(0xb910, LPGFR,   RRE,   Z,   0, r2_32s, r1, 0, abs, abs64)
+    C(0xb300, LPEBR,   RRE,   Z,   0, e2, new, e1, absf32, f32)
+    C(0xb310, LPDBR,   RRE,   Z,   0, f2_o, f1, 0, absf64, f64)
+    C(0xb340, LPXBR,   RRE,   Z,   0, x2_o, x1, 0, absf128, f128)
+    C(0xb370, LPDFR,   RRE,   FPSSH, 0, f2_o, f1, 0, absf64, 0)
+/* LOAD REVERSED */
+    C(0xb91f, LRVR,    RRE,   Z,   0, r2_32u, new, r1_32, rev32, 0)
+    C(0xb90f, LRVGR,   RRE,   Z,   0, r2_o, r1, 0, rev64, 0)
+    C(0xe31f, LRVH,    RXY_a, Z,   0, m2_16u, new, r1_16, rev16, 0)
+    C(0xe31e, LRV,     RXY_a, Z,   0, m2_32u, new, r1_32, rev32, 0)
+    C(0xe30f, LRVG,    RXY_a, Z,   0, m2_64, r1, 0, rev64, 0)
+/* LOAD ZERO */
+    C(0xb374, LZER,    RRE,   Z,   0, 0, 0, e1, zero, 0)
+    C(0xb375, LZDR,    RRE,   Z,   0, 0, 0, f1, zero, 0)
+    C(0xb376, LZXR,    RRE,   Z,   0, 0, 0, x1, zero2, 0)
+
+/* LOAD FPC */
+    C(0xb29d, LFPC,    S,     Z,   0, m2_32u, 0, 0, sfpc, 0)
+/* LOAD FPC AND SIGNAL */
+    C(0xb2bd, LFAS,    S,     IEEEE_SIM, 0, m2_32u, 0, 0, sfas, 0)
+/* LOAD FP INTEGER */
+    C(0xb357, FIEBR,   RRF_e, Z,   0, e2, new, e1, fieb, 0)
+    C(0xb35f, FIDBR,   RRF_e, Z,   0, f2_o, f1, 0, fidb, 0)
+    C(0xb347, FIXBR,   RRF_e, Z,   0, x2_o, x1, 0, fixb, 0)
+
+/* LOAD LENGTHENED */
+    C(0xb304, LDEBR,   RRE,   Z,   0, e2, f1, 0, ldeb, 0)
+    C(0xb305, LXDBR,   RRE,   Z,   0, f2_o, x1, 0, lxdb, 0)
+    C(0xb306, LXEBR,   RRE,   Z,   0, e2, x1, 0, lxeb, 0)
+    C(0xed04, LDEB,    RXE,   Z,   0, m2_32u, f1, 0, ldeb, 0)
+    C(0xed05, LXDB,    RXE,   Z,   0, m2_64, x1, 0, lxdb, 0)
+    C(0xed06, LXEB,    RXE,   Z,   0, m2_32u, x1, 0, lxeb, 0)
+/* LOAD ROUNDED */
+    C(0xb344, LEDBR,   RRE,   Z,   0, f2_o, new, e1, ledb, 0)
+    C(0xb345, LDXBR,   RRE,   Z,   0, x2_o, f1, 0, ldxb, 0)
+    C(0xb346, LEXBR,   RRE,   Z,   0, x2_o, new, e1, lexb, 0)
+
+/* LOAD MULTIPLE */
+    C(0x9800, LM,      RS_a,  Z,   0, a2, 0, 0, lm32, 0)
+    C(0xeb98, LMY,     RSY_a, LD,  0, a2, 0, 0, lm32, 0)
+    C(0xeb04, LMG,     RSY_a, Z,   0, a2, 0, 0, lm64, 0)
+/* LOAD MULTIPLE HIGH */
+    C(0xeb96, LMH,     RSY_a, Z,   0, a2, 0, 0, lmh, 0)
+/* LOAD ACCESS MULTIPLE */
+    C(0x9a00, LAM,     RS_a,  Z,   0, a2, 0, 0, lam, 0)
+    C(0xeb9a, LAMY,    RSY_a, LD,  0, a2, 0, 0, lam, 0)
+
+/* MOVE */
+    C(0xd200, MVC,     SS_a,  Z,   la1, a2, 0, 0, mvc, 0)
+    C(0xe544, MVHHI,   SIL,   GIE, la1, i2, 0, m1_16, mov2, 0)
+    C(0xe54c, MVHI,    SIL,   GIE, la1, i2, 0, m1_32, mov2, 0)
+    C(0xe548, MVGHI,   SIL,   GIE, la1, i2, 0, m1_64, mov2, 0)
+    C(0x9200, MVI,     SI,    Z,   la1, i2, 0, m1_8, mov2, 0)
+    C(0xeb52, MVIY,    SIY,   LD,  la1, i2, 0, m1_8, mov2, 0)
+/* MOVE LONG */
+    C(0x0e00, MVCL,    RR_a,  Z,   0, 0, 0, 0, mvcl, 0)
+/* MOVE LONG EXTENDED */
+    C(0xa800, MVCLE,   RS_a,  Z,   0, a2, 0, 0, mvcle, 0)
+/* MOVE PAGE */
+    C(0xb254, MVPG,    RRE,   Z,   r1_o, r2_o, 0, 0, mvpg, 0)
+/* MOVE STRING */
+    C(0xb255, MVST,    RRE,   Z,   r1_o, r2_o, 0, 0, mvst, 0)
+
+/* MULTIPLY */
+    C(0x1c00, MR,      RR_a,  Z,   r1p1_32s, r2_32s, new, r1_D32, mul, 0)
+    C(0x5c00, M,       RX_a,  Z,   r1p1_32s, m2_32s, new, r1_D32, mul, 0)
+    C(0xe35c, MFY,     RXY_a, GIE, r1p1_32s, m2_32s, new, r1_D32, mul, 0)
+    C(0xb317, MEEBR,   RRE,   Z,   e1, e2, new, e1, meeb, 0)
+    C(0xb31c, MDBR,    RRE,   Z,   f1_o, f2_o, f1, 0, mdb, 0)
+    C(0xb34c, MXBR,    RRE,   Z,   0, x2_o, x1, 0, mxb, 0)
+    C(0xb30c, MDEBR,   RRE,   Z,   f1_o, e2, f1, 0, mdeb, 0)
+    C(0xb307, MXDBR,   RRE,   Z,   0, f2_o, x1, 0, mxdb, 0)
+    C(0xed17, MEEB,    RXE,   Z,   e1, m2_32u, new, e1, meeb, 0)
+    C(0xed1c, MDB,     RXE,   Z,   f1_o, m2_64, f1, 0, mdb, 0)
+    C(0xed0c, MDEB,    RXE,   Z,   f1_o, m2_32u, f1, 0, mdeb, 0)
+    C(0xed07, MXDB,    RXE,   Z,   0, m2_64, x1, 0, mxdb, 0)
+/* MULTIPLY HALFWORD */
+    C(0x4c00, MH,      RX_a,  Z,   r1_o, m2_16s, new, r1_32, mul, 0)
+    C(0xe37c, MHY,     RXY_a, GIE, r1_o, m2_16s, new, r1_32, mul, 0)
+/* MULTIPLY HALFWORD IMMEDIATE */
+    C(0xa70c, MHI,     RI_a,  Z,   r1_o, i2, new, r1_32, mul, 0)
+    C(0xa70d, MGHI,    RI_a,  Z,   r1_o, i2, r1, 0, mul, 0)
+/* MULTIPLY LOGICAL */
+    C(0xb996, MLR,     RRE,   Z,   r1p1_32u, r2_32u, new, r1_D32, mul, 0)
+    C(0xe396, ML,      RXY_a, Z,   r1p1_32u, m2_32u, new, r1_D32, mul, 0)
+    C(0xb986, MLGR,    RRE,   Z,   r1p1, r2_o, r1_P, 0, mul128, 0)
+    C(0xe386, MLG,     RXY_a, Z,   r1p1, m2_64, r1_P, 0, mul128, 0)
+/* MULTIPLY SINGLE */
+    C(0xb252, MSR,     RRE,   Z,   r1_o, r2_o, new, r1_32, mul, 0)
+    C(0x7100, MS,      RX_a,  Z,   r1_o, m2_32s, new, r1_32, mul, 0)
+    C(0xe351, MSY,     RXY_a, LD,  r1_o, m2_32s, new, r1_32, mul, 0)
+    C(0xb90c, MSGR,    RRE,   Z,   r1_o, r2_o, r1, 0, mul, 0)
+    C(0xb91c, MSGFR,   RRE,   Z,   r1_o, r2_32s, r1, 0, mul, 0)
+    C(0xe30c, MSG,     RXY_a, Z,   r1_o, m2_64, r1, 0, mul, 0)
+    C(0xe31c, MSGF,    RXY_a, Z,   r1_o, m2_32s, r1, 0, mul, 0)
+/* MULTIPLY SINGLE IMMEDIATE */
+    C(0xc201, MSFI,    RIL_a, GIE, r1_o, i2, new, r1_32, mul, 0)
+    C(0xc200, MSGFI,   RIL_a, GIE, r1_o, i2, r1, 0, mul, 0)
+
+/* MULTIPLY AND ADD */
+    C(0xb30e, MAEBR,   RRD,   Z,   e1, e2, new, e1, maeb, 0)
+    C(0xb31e, MADBR,   RRD,   Z,   f1_o, f2_o, f1, 0, madb, 0)
+    C(0xed0e, MAEB,    RXF,   Z,   e1, m2_32u, new, e1, maeb, 0)
+    C(0xed1e, MADB,    RXF,   Z,   f1_o, m2_64, f1, 0, madb, 0)
+/* MULTIPLY AND SUBTRACT */
+    C(0xb30f, MSEBR,   RRD,   Z,   e1, e2, new, e1, mseb, 0)
+    C(0xb31f, MSDBR,   RRD,   Z,   f1_o, f2_o, f1, 0, msdb, 0)
+    C(0xed0f, MSEB,    RXF,   Z,   e1, m2_32u, new, e1, mseb, 0)
+    C(0xed1f, MSDB,    RXF,   Z,   f1_o, m2_64, f1, 0, msdb, 0)
+
+/* OR */
+    C(0x1600, OR,      RR_a,  Z,   r1, r2, new, r1_32, or, nz32)
+    C(0xb9f6, ORK,     RRF_a, DO,  r2, r3, new, r1_32, or, nz32)
+    C(0x5600, O,       RX_a,  Z,   r1, m2_32s, new, r1_32, or, nz32)
+    C(0xe356, OY,      RXY_a, LD,  r1, m2_32s, new, r1_32, or, nz32)
+    C(0xb981, OGR,     RRE,   Z,   r1, r2, r1, 0, or, nz64)
+    C(0xb9e6, OGRK,    RRF_a, DO,  r2, r3, r1, 0, or, nz64)
+    C(0xe381, OG,      RXY_a, Z,   r1, m2_64, r1, 0, or, nz64)
+    C(0xd600, OC,      SS_a,  Z,   la1, a2, 0, 0, oc, 0)
+/* OR IMMEDIATE */
+    D(0xc00c, OIHF,    RIL_a, EI,  r1_o, i2_32u, r1, 0, ori, 0, 0x2020)
+    D(0xc00d, OILF,    RIL_a, EI,  r1_o, i2_32u, r1, 0, ori, 0, 0x2000)
+    D(0xa508, OIHH,    RI_a,  Z,   r1_o, i2_16u, r1, 0, ori, 0, 0x1030)
+    D(0xa509, OIHL,    RI_a,  Z,   r1_o, i2_16u, r1, 0, ori, 0, 0x1020)
+    D(0xa50a, OILH,    RI_a,  Z,   r1_o, i2_16u, r1, 0, ori, 0, 0x1010)
+    D(0xa50b, OILL,    RI_a,  Z,   r1_o, i2_16u, r1, 0, ori, 0, 0x1000)
+    C(0x9600, OI,      SI,    Z,   m1_8u, i2_8u, new, m1_8, or, nz64)
+    C(0xeb56, OIY,     SIY,   LD,  m1_8u, i2_8u, new, m1_8, or, nz64)
+
+/* PREFETCH */
+    /* Implemented as nops of course.  */
+    C(0xe336, PFD,     RXY_b, GIE, 0, 0, 0, 0, 0, 0)
+    C(0xc602, PFDRL,   RIL_c, GIE, 0, 0, 0, 0, 0, 0)
+
+/* POPULATION COUNT */
+    C(0xb9e1, POPCNT,  RRE,   PC,  0, r2_o, r1, 0, popcnt, nz64)
+
+/* ROTATE LEFT SINGLE LOGICAL */
+    C(0xeb1d, RLL,     RSY_a, Z,   r3_o, sh32, new, r1_32, rll32, 0)
+    C(0xeb1c, RLLG,    RSY_a, Z,   r3_o, sh64, r1, 0, rll64, 0)
+
+/* ROTATE THEN INSERT SELECTED BITS */
+    C(0xec55, RISBG,   RIE_f, GIE, 0, r2, r1, 0, risbg, s64)
+    C(0xec59, RISBGN,  RIE_f, MIE, 0, r2, r1, 0, risbg, 0)
+    C(0xec5d, RISBHG,  RIE_f, HW,  0, r2, r1, 0, risbg, 0)
+    C(0xec51, RISBLG,  RIE_f, HW,  0, r2, r1, 0, risbg, 0)
+/* ROTATE_THEN <OP> SELECTED BITS */
+    C(0xec54, RNSBG,   RIE_f, GIE, 0, r2, r1, 0, rosbg, 0)
+    C(0xec56, ROSBG,   RIE_f, GIE, 0, r2, r1, 0, rosbg, 0)
+    C(0xec57, RXSBG,   RIE_f, GIE, 0, r2, r1, 0, rosbg, 0)
+
+/* SEARCH STRING */
+    C(0xb25e, SRST,    RRE,   Z,   r1_o, r2_o, 0, 0, srst, 0)
+
+/* SET ACCESS */
+    C(0xb24e, SAR,     RRE,   Z,   0, r2_o, 0, 0, sar, 0)
+/* SET ADDRESSING MODE */
+    D(0x010c, SAM24,   E,     Z,   0, 0, 0, 0, sam, 0, 0)
+    D(0x010d, SAM31,   E,     Z,   0, 0, 0, 0, sam, 0, 1)
+    D(0x010e, SAM64,   E,     Z,   0, 0, 0, 0, sam, 0, 3)
+/* SET FPC */
+    C(0xb384, SFPC,    RRE,   Z,   0, r1_o, 0, 0, sfpc, 0)
+/* SET FPC AND SIGNAL */
+    C(0xb385, SFASR,   RRE,   IEEEE_SIM, 0, r1_o, 0, 0, sfas, 0)
+/* SET BFP ROUNDING MODE */
+    C(0xb299, SRNM,    S,     Z,   0, 0, 0, 0, srnm, 0)
+    C(0xb2b8, SRNMB,   S,     FPE, 0, 0, 0, 0, srnm, 0)
+/* SET DFP ROUNDING MODE */
+    C(0xb2b9, SRNMT,   S,     DFPR, 0, 0, 0, 0, srnm, 0)
+
+/* SHIFT LEFT SINGLE */
+    D(0x8b00, SLA,     RS_a,  Z,   r1, sh32, new, r1_32, sla, 0, 31)
+    D(0xebdd, SLAK,    RSY_a, DO,  r3, sh32, new, r1_32, sla, 0, 31)
+    D(0xeb0b, SLAG,    RSY_a, Z,   r3, sh64, r1, 0, sla, 0, 63)
+/* SHIFT LEFT SINGLE LOGICAL */
+    C(0x8900, SLL,     RS_a,  Z,   r1_o, sh32, new, r1_32, sll, 0)
+    C(0xebdf, SLLK,    RSY_a, DO,  r3_o, sh32, new, r1_32, sll, 0)
+    C(0xeb0d, SLLG,    RSY_a, Z,   r3_o, sh64, r1, 0, sll, 0)
+/* SHIFT RIGHT SINGLE */
+    C(0x8a00, SRA,     RS_a,  Z,   r1_32s, sh32, new, r1_32, sra, s32)
+    C(0xebdc, SRAK,    RSY_a, DO,  r3_32s, sh32, new, r1_32, sra, s32)
+    C(0xeb0a, SRAG,    RSY_a, Z,   r3_o, sh64, r1, 0, sra, s64)
+/* SHIFT RIGHT SINGLE LOGICAL */
+    C(0x8800, SRL,     RS_a,  Z,   r1_32u, sh32, new, r1_32, srl, 0)
+    C(0xebde, SRLK,    RSY_a, DO,  r3_32u, sh32, new, r1_32, srl, 0)
+    C(0xeb0c, SRLG,    RSY_a, Z,   r3_o, sh64, r1, 0, srl, 0)
+/* SHIFT LEFT DOUBLE */
+    D(0x8f00, SLDA,    RS_a,  Z,   r1_D32, sh64, new, r1_D32, sla, 0, 31)
+/* SHIFT LEFT DOUBLE LOGICAL */
+    C(0x8d00, SLDL,    RS_a,  Z,   r1_D32, sh64, new, r1_D32, sll, 0)
+/* SHIFT RIGHT DOUBLE */
+    C(0x8e00, SRDA,    RS_a,  Z,   r1_D32, sh64, new, r1_D32, sra, s64)
+/* SHIFT RIGHT DOUBLE LOGICAL */
+    C(0x8c00, SRDL,    RS_a,  Z,   r1_D32, sh64, new, r1_D32, srl, 0)
+
+/* SQUARE ROOT */
+    C(0xb314, SQEBR,   RRE,   Z,   0, e2, new, e1, sqeb, 0)
+    C(0xb315, SQDBR,   RRE,   Z,   0, f2_o, f1, 0, sqdb, 0)
+    C(0xb316, SQXBR,   RRE,   Z,   0, x2_o, x1, 0, sqxb, 0)
+    C(0xed14, SQEB,    RXE,   Z,   0, m2_32u, new, e1, sqeb, 0)
+    C(0xed15, SQDB,    RXE,   Z,   0, m2_64, f1, 0, sqdb, 0)
+
+/* STORE */
+    C(0x5000, ST,      RX_a,  Z,   r1_o, a2, 0, 0, st32, 0)
+    C(0xe350, STY,     RXY_a, LD,  r1_o, a2, 0, 0, st32, 0)
+    C(0xe324, STG,     RXY_a, Z,   r1_o, a2, 0, 0, st64, 0)
+    C(0x6000, STD,     RX_a,  Z,   f1_o, a2, 0, 0, st64, 0)
+    C(0xed67, STDY,    RXY_a, LD,  f1_o, a2, 0, 0, st64, 0)
+    C(0x7000, STE,     RX_a,  Z,   e1, a2, 0, 0, st32, 0)
+    C(0xed66, STEY,    RXY_a, LD,  e1, a2, 0, 0, st32, 0)
+/* STORE RELATIVE LONG */
+    C(0xc40f, STRL,    RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0)
+    C(0xc40b, STGRL,   RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0)
+/* STORE CHARACTER */
+    C(0x4200, STC,     RX_a,  Z,   r1_o, a2, 0, 0, st8, 0)
+    C(0xe372, STCY,    RXY_a, LD,  r1_o, a2, 0, 0, st8, 0)
+/* STORE CHARACTER HIGH */
+    C(0xe3c3, STCH,    RXY_a, HW,  r1_sr32, a2, 0, 0, st8, 0)
+/* STORE CHARACTERS UNDER MASK */
+    D(0xbe00, STCM,    RS_b,  Z,   r1_o, a2, 0, 0, stcm, 0, 0)
+    D(0xeb2d, STCMY,   RSY_b, LD,  r1_o, a2, 0, 0, stcm, 0, 0)
+    D(0xeb2c, STCMH,   RSY_b, Z,   r1_o, a2, 0, 0, stcm, 0, 32)
+/* STORE HALFWORD */
+    C(0x4000, STH,     RX_a,  Z,   r1_o, a2, 0, 0, st16, 0)
+    C(0xe370, STHY,    RXY_a, LD,  r1_o, a2, 0, 0, st16, 0)
+/* STORE HALFWORD HIGH */
+    C(0xe3c7, STHH,    RXY_a, HW,  r1_sr32, a2, 0, 0, st16, 0)
+/* STORE HALFWORD RELATIVE LONG */
+    C(0xc407, STHRL,   RIL_b, GIE, r1_o, ri2, 0, 0, st16, 0)
+/* STORE HIGH */
+    C(0xe3cb, STFH,    RXY_a, HW,  r1_sr32, a2, 0, 0, st32, 0)
+/* STORE ON CONDITION */
+    D(0xebf3, STOC,    RSY_b, LOC, 0, 0, 0, 0, soc, 0, 0)
+    D(0xebe3, STOCG,   RSY_b, LOC, 0, 0, 0, 0, soc, 0, 1)
+/* STORE REVERSED */
+    C(0xe33f, STRVH,   RXY_a, Z,   la2, r1_16u, new, m1_16, rev16, 0)
+    C(0xe33e, STRV,    RXY_a, Z,   la2, r1_32u, new, m1_32, rev32, 0)
+    C(0xe32f, STRVG,   RXY_a, Z,   la2, r1_o, new, m1_64, rev64, 0)
+
+/* STORE FPC */
+    C(0xb29c, STFPC,   S,     Z,   0, a2, new, m2_32, efpc, 0)
+
+/* STORE MULTIPLE */
+    D(0x9000, STM,     RS_a,  Z,   0, a2, 0, 0, stm, 0, 4)
+    D(0xeb90, STMY,    RSY_a, LD,  0, a2, 0, 0, stm, 0, 4)
+    D(0xeb24, STMG,    RSY_a, Z,   0, a2, 0, 0, stm, 0, 8)
+/* STORE MULTIPLE HIGH */
+    C(0xeb26, STMH,    RSY_a, Z,   0, a2, 0, 0, stmh, 0)
+/* STORE ACCESS MULTIPLE */
+    C(0x9b00, STAM,    RS_a,  Z,   0, a2, 0, 0, stam, 0)
+    C(0xeb9b, STAMY,   RSY_a, LD,  0, a2, 0, 0, stam, 0)
+
+/* SUBTRACT */
+    C(0x1b00, SR,      RR_a,  Z,   r1, r2, new, r1_32, sub, subs32)
+    C(0xb9f9, SRK,     RRF_a, DO,  r2, r3, new, r1_32, sub, subs32)
+    C(0x5b00, S,       RX_a,  Z,   r1, m2_32s, new, r1_32, sub, subs32)
+    C(0xe35b, SY,      RXY_a, LD,  r1, m2_32s, new, r1_32, sub, subs32)
+    C(0xb909, SGR,     RRE,   Z,   r1, r2, r1, 0, sub, subs64)
+    C(0xb919, SGFR,    RRE,   Z,   r1, r2_32s, r1, 0, sub, subs64)
+    C(0xb9e9, SGRK,    RRF_a, DO,  r2, r3, r1, 0, sub, subs64)
+    C(0xe309, SG,      RXY_a, Z,   r1, m2_64, r1, 0, sub, subs64)
+    C(0xe319, SGF,     RXY_a, Z,   r1, m2_32s, r1, 0, sub, subs64)
+    C(0xb30b, SEBR,    RRE,   Z,   e1, e2, new, e1, seb, f32)
+    C(0xb31b, SDBR,    RRE,   Z,   f1_o, f2_o, f1, 0, sdb, f64)
+    C(0xb34b, SXBR,    RRE,   Z,   0, x2_o, x1, 0, sxb, f128)
+    C(0xed0b, SEB,     RXE,   Z,   e1, m2_32u, new, e1, seb, f32)
+    C(0xed1b, SDB,     RXE,   Z,   f1_o, m2_64, f1, 0, sdb, f64)
+/* SUBTRACT HALFWORD */
+    C(0x4b00, SH,      RX_a,  Z,   r1, m2_16s, new, r1_32, sub, subs32)
+    C(0xe37b, SHY,     RXY_a, LD,  r1, m2_16s, new, r1_32, sub, subs32)
+/* SUBTRACT HIGH */
+    C(0xb9c9, SHHHR,   RRF_a, HW,  r2_sr32, r3_sr32, new, r1_32h, sub, subs32)
+    C(0xb9d9, SHHLR,   RRF_a, HW,  r2_sr32, r3, new, r1_32h, sub, subs32)
+/* SUBTRACT LOGICAL */
+    C(0x1f00, SLR,     RR_a,  Z,   r1, r2, new, r1_32, sub, subu32)
+    C(0xb9fb, SLRK,    RRF_a, DO,  r2, r3, new, r1_32, sub, subu32)
+    C(0x5f00, SL,      RX_a,  Z,   r1, m2_32u, new, r1_32, sub, subu32)
+    C(0xe35f, SLY,     RXY_a, LD,  r1, m2_32u, new, r1_32, sub, subu32)
+    C(0xb90b, SLGR,    RRE,   Z,   r1, r2, r1, 0, sub, subu64)
+    C(0xb91b, SLGFR,   RRE,   Z,   r1, r2_32u, r1, 0, sub, subu64)
+    C(0xb9eb, SLGRK,   RRF_a, DO,  r2, r3, r1, 0, sub, subu64)
+    C(0xe30b, SLG,     RXY_a, Z,   r1, m2_64, r1, 0, sub, subu64)
+    C(0xe31b, SLGF,    RXY_a, Z,   r1, m2_32u, r1, 0, sub, subu64)
+/* SUBTRACT LOCICAL HIGH */
+    C(0xb9cb, SLHHHR,  RRF_a, HW,  r2_sr32, r3_sr32, new, r1_32h, sub, subu32)
+    C(0xb9db, SLHHLR,  RRF_a, HW,  r2_sr32, r3, new, r1_32h, sub, subu32)
+/* SUBTRACT LOGICAL IMMEDIATE */
+    C(0xc205, SLFI,    RIL_a, EI,  r1, i2_32u, new, r1_32, sub, subu32)
+    C(0xc204, SLGFI,   RIL_a, EI,  r1, i2_32u, r1, 0, sub, subu64)
+/* SUBTRACT LOGICAL WITH BORROW */
+    C(0xb999, SLBR,    RRE,   Z,   r1, r2, new, r1_32, subb, subb32)
+    C(0xb989, SLBGR,   RRE,   Z,   r1, r2, r1, 0, subb, subb64)
+    C(0xe399, SLB,     RXY_a, Z,   r1, m2_32u, new, r1_32, subb, subb32)
+    C(0xe389, SLBG,    RXY_a, Z,   r1, m2_64, r1, 0, subb, subb64)
+
+/* SUPERVISOR CALL */
+    C(0x0a00, SVC,     I,     Z,   0, 0, 0, 0, svc, 0)
+
+/* TEST DATA CLASS */
+    C(0xed10, TCEB,    RXE,   Z,   e1, a2, 0, 0, tceb, 0)
+    C(0xed11, TCDB,    RXE,   Z,   f1_o, a2, 0, 0, tcdb, 0)
+    C(0xed12, TCXB,    RXE,   Z,   x1_o, a2, 0, 0, tcxb, 0)
+
+/* TEST UNDER MASK */
+    C(0x9100, TM,      SI,    Z,   m1_8u, i2_8u, 0, 0, 0, tm32)
+    C(0xeb51, TMY,     SIY,   LD,  m1_8u, i2_8u, 0, 0, 0, tm32)
+    D(0xa702, TMHH,    RI_a,  Z,   r1_o, i2_16u_shl, 0, 0, 0, tm64, 48)
+    D(0xa703, TMHL,    RI_a,  Z,   r1_o, i2_16u_shl, 0, 0, 0, tm64, 32)
+    D(0xa700, TMLH,    RI_a,  Z,   r1_o, i2_16u_shl, 0, 0, 0, tm64, 16)
+    D(0xa701, TMLL,    RI_a,  Z,   r1_o, i2_16u_shl, 0, 0, 0, tm64, 0)
+
+/* TRANSLATE */
+    C(0xdc00, TR,      SS_a,  Z,   la1, a2, 0, 0, tr, 0)
+/* TRANSLATE AND TEST */
+    C(0xdd00, TRT,     SS_a,  Z,   la1, a2, 0, 0, trt, 0)
+/* TRANSLATE EXTENDED */
+    C(0xb2a5, TRE,     RRE,   Z,   0, r2, r1_P, 0, tre, 0)
+
+/* UNPACK */
+    /* Really format SS_b, but we pack both lengths into one argument
+       for the helper call, so we might as well leave one 8-bit field.  */
+    C(0xf300, UNPK,    SS_a,  Z,   la1, a2, 0, 0, unpk, 0)
+
+#ifndef CONFIG_USER_ONLY
+/* COMPARE AND SWAP AND PURGE */
+    C(0xb250, CSP,     RRE,   Z,   0, ra2, 0, 0, csp, 0)
+/* DIAGNOSE (KVM hypercall) */
+    C(0x8300, DIAG,    RSI,   Z,   0, 0, 0, 0, diag, 0)
+/* INSERT STORAGE KEY EXTENDED */
+    C(0xb229, ISKE,    RRE,   Z,   0, r2_o, new, r1_8, iske, 0)
+/* INVALIDATE PAGE TABLE ENTRY */
+    C(0xb221, IPTE,    RRF_a, Z,   r1_o, r2_o, 0, 0, ipte, 0)
+/* LOAD CONTROL */
+    C(0xb700, LCTL,    RS_a,  Z,   0, a2, 0, 0, lctl, 0)
+    C(0xeb2f, LCTLG,   RSY_a, Z,   0, a2, 0, 0, lctlg, 0)
+/* LOAD PSW */
+    C(0x8200, LPSW,    S,     Z,   0, a2, 0, 0, lpsw, 0)
+/* LOAD PSW EXTENDED */
+    C(0xb2b2, LPSWE,   S,     Z,   0, a2, 0, 0, lpswe, 0)
+/* LOAD REAL ADDRESS */
+    C(0xb100, LRA,     RX_a,  Z,   0, a2, r1, 0, lra, 0)
+    C(0xe313, LRAY,    RXY_a, LD,  0, a2, r1, 0, lra, 0)
+    C(0xe303, LRAG,    RXY_a, Z,   0, a2, r1, 0, lra, 0)
+/* LOAD USING REAL ADDRESS */
+    C(0xb24b, LURA,    RRE,   Z,   0, r2, new, r1_32, lura, 0)
+    C(0xb905, LURAG,   RRE,   Z,   0, r2, r1, 0, lurag, 0)
+/* MOVE TO PRIMARY */
+    C(0xda00, MVCP,    SS_d,  Z,   la1, a2, 0, 0, mvcp, 0)
+/* MOVE TO SECONDARY */
+    C(0xdb00, MVCS,    SS_d,  Z,   la1, a2, 0, 0, mvcs, 0)
+/* PURGE TLB */
+    C(0xb20d, PTLB,    S,     Z,   0, 0, 0, 0, ptlb, 0)
+/* RESET REFERENCE BIT EXTENDED */
+    C(0xb22a, RRBE,    RRE,   Z,   0, r2_o, 0, 0, rrbe, 0)
+/* SERVICE CALL LOGICAL PROCESSOR (PV hypercall) */
+    C(0xb220, SERVC,   RRE,   Z,   r1_o, r2_o, 0, 0, servc, 0)
+/* SET ADDRESS SPACE CONTROL FAST */
+    C(0xb279, SACF,    S,     Z,   0, a2, 0, 0, sacf, 0)
+/* SET CLOCK */
+    /* ??? Not implemented - is it necessary? */
+    C(0xb204, SCK,     S,     Z,   0, 0, 0, 0, 0, 0)
+/* SET CLOCK COMPARATOR */
+    C(0xb206, SCKC,    S,     Z,   0, m2_64, 0, 0, sckc, 0)
+/* SET CPU TIMER */
+    C(0xb208, SPT,     S,     Z,   0, m2_64, 0, 0, spt, 0)
+/* SET PREFIX */
+    C(0xb210, SPX,     S,     Z,   0, m2_32u, 0, 0, spx, 0)
+/* SET PSW KEY FROM ADDRESS */
+    C(0xb20a, SPKA,    S,     Z,   0, a2, 0, 0, spka, 0)
+/* SET STORAGE KEY EXTENDED */
+    C(0xb22b, SSKE,    RRF_c, Z,   r1_o, r2_o, 0, 0, sske, 0)
+/* SET SYSTEM MASK */
+    C(0x8000, SSM,     S,     Z,   0, m2_8u, 0, 0, ssm, 0)
+/* SIGNAL PROCESSOR */
+    C(0xae00, SIGP,    RS_a,  Z,   r3_o, a2, 0, 0, sigp, 0)
+/* STORE CLOCK */
+    C(0xb205, STCK,    S,     Z,   la2, 0, new, m1_64, stck, 0)
+    C(0xb27c, STCKF,   S,     SCF, la2, 0, new, m1_64, stck, 0)
+/* STORE CLOCK EXTENDED */
+    C(0xb278, STCKE,   S,     Z,   0, a2, 0, 0, stcke, 0)
+/* STORE CLOCK COMPARATOR */
+    C(0xb207, STCKC,   S,     Z,   la2, 0, new, m1_64, stckc, 0)
+/* STORE CONTROL */
+    C(0xb600, STCTL,   RS_a,  Z,   0, a2, 0, 0, stctl, 0)
+    C(0xeb25, STCTG,   RSY_a, Z,   0, a2, 0, 0, stctg, 0)
+/* STORE CPU ADDRESS */
+    C(0xb212, STAP,    S,     Z,   la2, 0, new, m1_16, stap, 0)
+/* STORE CPU ID */
+    C(0xb202, STIDP,   S,     Z,   la2, 0, new, m1_64, stidp, 0)
+/* STORE CPU TIMER */
+    C(0xb209, STPT,    S,     Z,   la2, 0, new, m1_64, stpt, 0)
+/* STORE FACILITY LIST */
+    C(0xb2b1, STFL,    S,     Z,   0, 0, 0, 0, stfl, 0)
+/* STORE PREFIX */
+    C(0xb211, STPX,    S,     Z,   la2, 0, new, m1_32, stpx, 0)
+/* STORE SYSTEM INFORMATION */
+    C(0xb27d, STSI,    S,     Z,   0, a2, 0, 0, stsi, 0)
+/* STORE THEN AND SYSTEM MASK */
+    C(0xac00, STNSM,   SI,    Z,   la1, 0, 0, 0, stnosm, 0)
+/* STORE THEN OR SYSTEM MASK */
+    C(0xad00, STOSM,   SI,    Z,   la1, 0, 0, 0, stnosm, 0)
+/* STORE USING REAL ADDRESS */
+    C(0xb246, STURA,   RRE,   Z,   r1_o, r2_o, 0, 0, stura, 0)
+    C(0xb925, STURG,   RRE,   Z,   r1_o, r2_o, 0, 0, sturg, 0)
+/* TEST PROTECTION */
+    C(0xe501, TPROT,   SSE,   Z,   la1, a2, 0, 0, tprot, 0)
+
+/* CCW I/O Instructions */
+    C(0xb276, XSCH,    S,     Z,   0, 0, 0, 0, xsch, 0)
+    C(0xb230, CSCH,    S,     Z,   0, 0, 0, 0, csch, 0)
+    C(0xb231, HSCH,    S,     Z,   0, 0, 0, 0, hsch, 0)
+    C(0xb232, MSCH,    S,     Z,   0, insn, 0, 0, msch, 0)
+    C(0xb23b, RCHP,    S,     Z,   0, 0, 0, 0, rchp, 0)
+    C(0xb238, RSCH,    S,     Z,   0, 0, 0, 0, rsch, 0)
+    C(0xb233, SSCH,    S,     Z,   0, insn, 0, 0, ssch, 0)
+    C(0xb234, STSCH,   S,     Z,   0, insn, 0, 0, stsch, 0)
+    C(0xb235, TSCH,    S,     Z,   0, insn, 0, 0, tsch, 0)
+    /* ??? Not listed in PoO ninth edition, but there's a linux driver that
+       uses it: "A CHSC subchannel is usually present on LPAR only."  */
+    C(0xb25f, CHSC,  RRE,     Z,   0, insn, 0, 0, chsc, 0)
+#endif /* CONFIG_USER_ONLY */
diff --git a/qemu/target-s390x/insn-format.def b/qemu/target-s390x/insn-format.def
new file mode 100644
index 000000000..0e898b90b
--- /dev/null
+++ b/qemu/target-s390x/insn-format.def
@@ -0,0 +1,55 @@
+/* Description of s390 insn formats.  */
+/* NAME   F1,          F2... */
+F0(E)
+F1(I,     I(1, 8, 8))
+F2(RI_a,  R(1, 8),     I(2,16,16))
+F2(RI_b,  R(1, 8),     I(2,16,16))
+F2(RI_c,  M(1, 8),     I(2,16,16))
+F3(RIE_a, R(1, 8),     I(2,16,16),  M(3,32))
+F4(RIE_b, R(1, 8),     R(2,12),     M(3,32),   I(4,16,16))
+F4(RIE_c, R(1, 8),     I(2,32, 8),  M(3,12),   I(4,16,16))
+F3(RIE_d, R(1, 8),     I(2,16,16),  R(3,12))
+F3(RIE_e, R(1, 8),     I(2,16,16),  R(3,12))
+F5(RIE_f, R(1, 8),     R(2,12),     I(3,16,8), I(4,24,8),  I(5,32,8))
+F2(RIL_a, R(1, 8),     I(2,16,32))
+F2(RIL_b, R(1, 8),     I(2,16,32))
+F2(RIL_c, M(1, 8),     I(2,16,32))
+F4(RIS,   R(1, 8),     I(2,32, 8),  M(3,12),   BD(4,16,20))
+/* ??? The PoO does not call out subtypes _a and _b for RR, as it does
+   for e.g. RX.  Our checking requires this for e.g. BCR.  */
+F2(RR_a,  R(1, 8),     R(2,12))
+F2(RR_b,  M(1, 8),     R(2,12))
+F2(RRE,   R(1,24),     R(2,28))
+F3(RRD,   R(1,16),     R(2,28),     R(3,24))
+F4(RRF_a, R(1,24),     R(2,28),     R(3,16),   M(4,20))
+F4(RRF_b, R(1,24),     R(2,28),     R(3,16),   M(4,20))
+F4(RRF_c, R(1,24),     R(2,28),     M(3,16),   M(4,20))
+F4(RRF_d, R(1,24),     R(2,28),     M(3,16),   M(4,20))
+F4(RRF_e, R(1,24),     R(2,28),     M(3,16),   M(4,20))
+F4(RRS,   R(1, 8),     R(2,12),     M(3,32),   BD(4,16,20))
+F3(RS_a,  R(1, 8),     BD(2,16,20), R(3,12))
+F3(RS_b,  R(1, 8),     BD(2,16,20), M(3,12))
+F3(RSI,   R(1, 8),     I(2,16,16),  R(3,12))
+F2(RSL,   L(1, 8, 4),  BD(1,16,20))
+F3(RSY_a, R(1, 8),     BDL(2),      R(3,12))
+F3(RSY_b, R(1, 8),     BDL(2),      M(3,12))
+F2(RX_a,  R(1, 8),     BXD(2))
+F2(RX_b,  M(1, 8),     BXD(2))
+F2(RXE,   R(1, 8),     BXD(2))
+F3(RXF,   R(1,32),     BXD(2),      R(3, 8))
+F2(RXY_a, R(1, 8),     BXDL(2))
+F2(RXY_b, M(1, 8),     BXDL(2))
+F1(S,     BD(2,16,20))
+F2(SI,    BD(1,16,20), I(2,8,8))
+F2(SIL,   BD(1,16,20), I(2,32,16))
+F2(SIY,   BDL(1),      I(2, 8, 8))
+F3(SS_a,  L(1, 8, 8),  BD(1,16,20), BD(2,32,36))
+F4(SS_b,  L(1, 8, 4),  BD(1,16,20), L(2,12,4),   BD(2,32,36))
+F4(SS_c,  L(1, 8, 4),  BD(1,16,20), BD(2,32,36), I(3,12, 4))
+/* ??? Odd man out.  The L1 field here is really a register, but the
+   easy way to compress the fields has R1 and B1 overlap.  */
+F4(SS_d,  L(1, 8, 4),  BD(1,16,20), BD(2,32,36), R(3,12))
+F4(SS_e,  R(1, 8),     BD(2,16,20), R(3,12),     BD(4,32,36))
+F3(SS_f,  BD(1,16,20), L(2,8,8),    BD(2,32,36))
+F2(SSE,   BD(1,16,20), BD(2,32,36))
+F3(SSF,   BD(1,16,20), BD(2,32,36), R(3,8))
diff --git a/qemu/target-s390x/int_helper.c b/qemu/target-s390x/int_helper.c
new file mode 100644
index 000000000..a46c736d6
--- /dev/null
+++ b/qemu/target-s390x/int_helper.c
@@ -0,0 +1,154 @@
+/*
+ *  S/390 integer helper routines
+ *
+ *  Copyright (c) 2009 Ulrich Hecht
+ *  Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+/* 64/32 -> 32 signed division */
+int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64)
+{
+    int32_t ret, b = b64;
+    int64_t q;
+
+    if (b == 0) {
+        runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+    }
+
+    ret = q = a / b;
+    env->retxl = a % b;
+
+    /* Catch non-representable quotient.  */
+    if (ret != q) {
+        runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+    }
+
+    return ret;
+}
+
+/* 64/32 -> 32 unsigned division */
+uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64)
+{
+    uint32_t ret, b = b64;
+    uint64_t q;
+
+    if (b == 0) {
+        runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+    }
+
+    ret = q = a / b;
+    env->retxl = a % b;
+
+    /* Catch non-representable quotient.  */
+    if (ret != q) {
+        runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+    }
+
+    return ret;
+}
+
+/* 64/64 -> 64 signed division */
+int64_t HELPER(divs64)(CPUS390XState *env, int64_t a, int64_t b)
+{
+    /* Catch divide by zero, and non-representable quotient (MIN / -1).  */
+    if (b == 0 || (b == -1 && a == (1ll << 63))) {
+        runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+    }
+    env->retxl = a % b;
+    return a / b;
+}
+
+/* 128 -> 64/64 unsigned division */
+uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al,
+                        uint64_t b)
+{
+    uint64_t ret;
+    /* Signal divide by zero.  */
+    if (b == 0) {
+        runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+    }
+    if (ah == 0) {
+        /* 64 -> 64/64 case */
+        env->retxl = al % b;
+        ret = al / b;
+    } else {
+        /* ??? Move i386 idivq helper to host-utils.  */
+#ifdef CONFIG_INT128
+        __uint128_t a = ((__uint128_t)ah << 64) | al;
+        __uint128_t q = a / b;
+        env->retxl = a % b;
+        ret = q;
+        if (ret != q) {
+            runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+        }
+#else
+        S390CPU *cpu = s390_env_get_cpu(env);
+        /* 32-bit hosts would need special wrapper functionality - just abort if
+           we encounter such a case; it's very unlikely anyways. */
+        cpu_abort(CPU(cpu), "128 -> 64/64 division not implemented\n");
+#endif
+    }
+    return ret;
+}
+
+/* count leading zeros, for find leftmost one */
+uint64_t HELPER(clz)(uint64_t v)
+{
+    return clz64(v);
+}
+
+uint64_t HELPER(cvd)(int32_t reg)
+{
+    /* positive 0 */
+    uint64_t dec = 0x0c;
+    int64_t bin = reg;
+    int shift;
+
+    if (bin < 0) {
+        bin = -bin;
+        dec = 0x0d;
+    }
+
+    for (shift = 4; (shift < 64) && bin; shift += 4) {
+        dec |= (bin % 10) << shift;
+        bin /= 10;
+    }
+
+    return dec;
+}
+
+uint64_t HELPER(popcnt)(uint64_t r2)
+{
+    uint64_t ret = 0;
+    int i;
+
+    for (i = 0; i < 64; i += 8) {
+        uint64_t t = ctpop32((r2 >> i) & 0xff);
+        ret |= t << i;
+    }
+    return ret;
+}
diff --git a/qemu/target-s390x/interrupt.c b/qemu/target-s390x/interrupt.c
new file mode 100644
index 000000000..1404d0afd
--- /dev/null
+++ b/qemu/target-s390x/interrupt.c
@@ -0,0 +1,66 @@
+/*
+ * QEMU S/390 Interrupt support
+ *
+ * Copyright IBM Corp. 2012, 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version.  See the COPYING file in the top-level directory.
+ */
+
+#include "cpu.h"
+#include "sysemu/kvm.h"
+
+/*
+ * All of the following interrupts are floating, i.e. not per-vcpu.
+ * We just need a dummy cpustate in order to be able to inject in the
+ * non-kvm case.
+ */
+#if !defined(CONFIG_USER_ONLY)
+void s390_sclp_extint(uint32_t parm)
+{
+    if (kvm_enabled()) {
+        kvm_s390_service_interrupt(parm);
+    } else {
+        S390CPU *dummy_cpu = s390_cpu_addr2state(0);
+
+        cpu_inject_ext(dummy_cpu, EXT_SERVICE, parm, 0);
+    }
+}
+
+void s390_virtio_irq(int config_change, uint64_t token)
+{
+    if (kvm_enabled()) {
+        kvm_s390_virtio_irq(config_change, token);
+    } else {
+        S390CPU *dummy_cpu = s390_cpu_addr2state(0);
+
+        cpu_inject_ext(dummy_cpu, EXT_VIRTIO, config_change, token);
+    }
+}
+
+void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
+                       uint32_t io_int_parm, uint32_t io_int_word)
+{
+    if (kvm_enabled()) {
+        kvm_s390_io_interrupt(subchannel_id, subchannel_nr, io_int_parm,
+                              io_int_word);
+    } else {
+        S390CPU *dummy_cpu = s390_cpu_addr2state(0);
+
+        cpu_inject_io(dummy_cpu, subchannel_id, subchannel_nr, io_int_parm,
+                      io_int_word);
+    }
+}
+
+void s390_crw_mchk(void)
+{
+    if (kvm_enabled()) {
+        kvm_s390_crw_mchk();
+    } else {
+        S390CPU *dummy_cpu = s390_cpu_addr2state(0);
+
+        cpu_inject_crw_mchk(dummy_cpu);
+    }
+}
+
+#endif
diff --git a/qemu/target-s390x/ioinst.c b/qemu/target-s390x/ioinst.c
new file mode 100644
index 000000000..77f2a1fb9
--- /dev/null
+++ b/qemu/target-s390x/ioinst.c
@@ -0,0 +1,831 @@
+/*
+ * I/O instructions for S/390
+ *
+ * Copyright 2012, 2015 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include <sys/types.h>
+
+#include "cpu.h"
+#include "ioinst.h"
+#include "trace.h"
+#include "hw/s390x/s390-pci-bus.h"
+
+int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid,
+                                 int *schid)
+{
+    if (!IOINST_SCHID_ONE(value)) {
+        return -EINVAL;
+    }
+    if (!IOINST_SCHID_M(value)) {
+        if (IOINST_SCHID_CSSID(value)) {
+            return -EINVAL;
+        }
+        *cssid = 0;
+        *m = 0;
+    } else {
+        *cssid = IOINST_SCHID_CSSID(value);
+        *m = 1;
+    }
+    *ssid = IOINST_SCHID_SSID(value);
+    *schid = IOINST_SCHID_NR(value);
+    return 0;
+}
+
+void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1)
+{
+    int cssid, ssid, schid, m;
+    SubchDev *sch;
+    int ret = -ENODEV;
+    int cc;
+
+    if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+        program_interrupt(&cpu->env, PGM_OPERAND, 2);
+        return;
+    }
+    trace_ioinst_sch_id("xsch", cssid, ssid, schid);
+    sch = css_find_subch(m, cssid, ssid, schid);
+    if (sch && css_subch_visible(sch)) {
+        ret = css_do_xsch(sch);
+    }
+    switch (ret) {
+    case -ENODEV:
+        cc = 3;
+        break;
+    case -EBUSY:
+        cc = 2;
+        break;
+    case 0:
+        cc = 0;
+        break;
+    default:
+        cc = 1;
+        break;
+    }
+    setcc(cpu, cc);
+}
+
+void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1)
+{
+    int cssid, ssid, schid, m;
+    SubchDev *sch;
+    int ret = -ENODEV;
+    int cc;
+
+    if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+        program_interrupt(&cpu->env, PGM_OPERAND, 2);
+        return;
+    }
+    trace_ioinst_sch_id("csch", cssid, ssid, schid);
+    sch = css_find_subch(m, cssid, ssid, schid);
+    if (sch && css_subch_visible(sch)) {
+        ret = css_do_csch(sch);
+    }
+    if (ret == -ENODEV) {
+        cc = 3;
+    } else {
+        cc = 0;
+    }
+    setcc(cpu, cc);
+}
+
+void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1)
+{
+    int cssid, ssid, schid, m;
+    SubchDev *sch;
+    int ret = -ENODEV;
+    int cc;
+
+    if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+        program_interrupt(&cpu->env, PGM_OPERAND, 2);
+        return;
+    }
+    trace_ioinst_sch_id("hsch", cssid, ssid, schid);
+    sch = css_find_subch(m, cssid, ssid, schid);
+    if (sch && css_subch_visible(sch)) {
+        ret = css_do_hsch(sch);
+    }
+    switch (ret) {
+    case -ENODEV:
+        cc = 3;
+        break;
+    case -EBUSY:
+        cc = 2;
+        break;
+    case 0:
+        cc = 0;
+        break;
+    default:
+        cc = 1;
+        break;
+    }
+    setcc(cpu, cc);
+}
+
+static int ioinst_schib_valid(SCHIB *schib)
+{
+    if ((be16_to_cpu(schib->pmcw.flags) & PMCW_FLAGS_MASK_INVALID) ||
+        (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_INVALID)) {
+        return 0;
+    }
+    /* Disallow extended measurements for now. */
+    if (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_XMWME) {
+        return 0;
+    }
+    return 1;
+}
+
+void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+{
+    int cssid, ssid, schid, m;
+    SubchDev *sch;
+    SCHIB schib;
+    uint64_t addr;
+    int ret = -ENODEV;
+    int cc;
+    CPUS390XState *env = &cpu->env;
+    uint8_t ar;
+
+    addr = decode_basedisp_s(env, ipb, &ar);
+    if (addr & 3) {
+        program_interrupt(env, PGM_SPECIFICATION, 2);
+        return;
+    }
+    if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) {
+        return;
+    }
+    if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
+        !ioinst_schib_valid(&schib)) {
+        program_interrupt(env, PGM_OPERAND, 2);
+        return;
+    }
+    trace_ioinst_sch_id("msch", cssid, ssid, schid);
+    sch = css_find_subch(m, cssid, ssid, schid);
+    if (sch && css_subch_visible(sch)) {
+        ret = css_do_msch(sch, &schib);
+    }
+    switch (ret) {
+    case -ENODEV:
+        cc = 3;
+        break;
+    case -EBUSY:
+        cc = 2;
+        break;
+    case 0:
+        cc = 0;
+        break;
+    default:
+        cc = 1;
+        break;
+    }
+    setcc(cpu, cc);
+}
+
+static void copy_orb_from_guest(ORB *dest, const ORB *src)
+{
+    dest->intparm = be32_to_cpu(src->intparm);
+    dest->ctrl0 = be16_to_cpu(src->ctrl0);
+    dest->lpm = src->lpm;
+    dest->ctrl1 = src->ctrl1;
+    dest->cpa = be32_to_cpu(src->cpa);
+}
+
+static int ioinst_orb_valid(ORB *orb)
+{
+    if ((orb->ctrl0 & ORB_CTRL0_MASK_INVALID) ||
+        (orb->ctrl1 & ORB_CTRL1_MASK_INVALID)) {
+        return 0;
+    }
+    if ((orb->cpa & HIGH_ORDER_BIT) != 0) {
+        return 0;
+    }
+    return 1;
+}
+
+void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+{
+    int cssid, ssid, schid, m;
+    SubchDev *sch;
+    ORB orig_orb, orb;
+    uint64_t addr;
+    int ret = -ENODEV;
+    int cc;
+    CPUS390XState *env = &cpu->env;
+    uint8_t ar;
+
+    addr = decode_basedisp_s(env, ipb, &ar);
+    if (addr & 3) {
+        program_interrupt(env, PGM_SPECIFICATION, 2);
+        return;
+    }
+    if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) {
+        return;
+    }
+    copy_orb_from_guest(&orb, &orig_orb);
+    if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
+        !ioinst_orb_valid(&orb)) {
+        program_interrupt(env, PGM_OPERAND, 2);
+        return;
+    }
+    trace_ioinst_sch_id("ssch", cssid, ssid, schid);
+    sch = css_find_subch(m, cssid, ssid, schid);
+    if (sch && css_subch_visible(sch)) {
+        ret = css_do_ssch(sch, &orb);
+    }
+    switch (ret) {
+    case -ENODEV:
+        cc = 3;
+        break;
+    case -EBUSY:
+        cc = 2;
+        break;
+    case 0:
+        cc = 0;
+        break;
+    default:
+        cc = 1;
+        break;
+    }
+    setcc(cpu, cc);
+}
+
+void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb)
+{
+    CRW crw;
+    uint64_t addr;
+    int cc;
+    CPUS390XState *env = &cpu->env;
+    uint8_t ar;
+
+    addr = decode_basedisp_s(env, ipb, &ar);
+    if (addr & 3) {
+        program_interrupt(env, PGM_SPECIFICATION, 2);
+        return;
+    }
+
+    cc = css_do_stcrw(&crw);
+    /* 0 - crw stored, 1 - zeroes stored */
+
+    if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) {
+        setcc(cpu, cc);
+    } else if (cc == 0) {
+        /* Write failed: requeue CRW since STCRW is a suppressing instruction */
+        css_undo_stcrw(&crw);
+    }
+}
+
+void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+{
+    int cssid, ssid, schid, m;
+    SubchDev *sch;
+    uint64_t addr;
+    int cc;
+    SCHIB schib;
+    CPUS390XState *env = &cpu->env;
+    uint8_t ar;
+
+    addr = decode_basedisp_s(env, ipb, &ar);
+    if (addr & 3) {
+        program_interrupt(env, PGM_SPECIFICATION, 2);
+        return;
+    }
+
+    if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+        /*
+         * As operand exceptions have a lower priority than access exceptions,
+         * we check whether the memory area is writeable (injecting the
+         * access execption if it is not) first.
+         */
+        if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) {
+            program_interrupt(env, PGM_OPERAND, 2);
+        }
+        return;
+    }
+    trace_ioinst_sch_id("stsch", cssid, ssid, schid);
+    sch = css_find_subch(m, cssid, ssid, schid);
+    if (sch) {
+        if (css_subch_visible(sch)) {
+            css_do_stsch(sch, &schib);
+            cc = 0;
+        } else {
+            /* Indicate no more subchannels in this css/ss */
+            cc = 3;
+        }
+    } else {
+        if (css_schid_final(m, cssid, ssid, schid)) {
+            cc = 3; /* No more subchannels in this css/ss */
+        } else {
+            /* Store an empty schib. */
+            memset(&schib, 0, sizeof(schib));
+            cc = 0;
+        }
+    }
+    if (cc != 3) {
+        if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib,
+                                    sizeof(schib)) != 0) {
+            return;
+        }
+    } else {
+        /* Access exceptions have a higher priority than cc3 */
+        if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) {
+            return;
+        }
+    }
+    setcc(cpu, cc);
+}
+
+int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+{
+    CPUS390XState *env = &cpu->env;
+    int cssid, ssid, schid, m;
+    SubchDev *sch;
+    IRB irb;
+    uint64_t addr;
+    int cc, irb_len;
+    uint8_t ar;
+
+    if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+        program_interrupt(env, PGM_OPERAND, 2);
+        return -EIO;
+    }
+    trace_ioinst_sch_id("tsch", cssid, ssid, schid);
+    addr = decode_basedisp_s(env, ipb, &ar);
+    if (addr & 3) {
+        program_interrupt(env, PGM_SPECIFICATION, 2);
+        return -EIO;
+    }
+
+    sch = css_find_subch(m, cssid, ssid, schid);
+    if (sch && css_subch_visible(sch)) {
+        cc = css_do_tsch_get_irb(sch, &irb, &irb_len);
+    } else {
+        cc = 3;
+    }
+    /* 0 - status pending, 1 - not status pending, 3 - not operational */
+    if (cc != 3) {
+        if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) {
+            return -EFAULT;
+        }
+        css_do_tsch_update_subch(sch);
+    } else {
+        irb_len = sizeof(irb) - sizeof(irb.emw);
+        /* Access exceptions have a higher priority than cc3 */
+        if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) {
+            return -EFAULT;
+        }
+    }
+
+    setcc(cpu, cc);
+    return 0;
+}
+
+typedef struct ChscReq {
+    uint16_t len;
+    uint16_t command;
+    uint32_t param0;
+    uint32_t param1;
+    uint32_t param2;
+} QEMU_PACKED ChscReq;
+
+typedef struct ChscResp {
+    uint16_t len;
+    uint16_t code;
+    uint32_t param;
+    char data[0];
+} QEMU_PACKED ChscResp;
+
+#define CHSC_MIN_RESP_LEN 0x0008
+
+#define CHSC_SCPD 0x0002
+#define CHSC_SCSC 0x0010
+#define CHSC_SDA  0x0031
+#define CHSC_SEI  0x000e
+
+#define CHSC_SCPD_0_M 0x20000000
+#define CHSC_SCPD_0_C 0x10000000
+#define CHSC_SCPD_0_FMT 0x0f000000
+#define CHSC_SCPD_0_CSSID 0x00ff0000
+#define CHSC_SCPD_0_RFMT 0x00000f00
+#define CHSC_SCPD_0_RES 0xc000f000
+#define CHSC_SCPD_1_RES 0xffffff00
+#define CHSC_SCPD_01_CHPID 0x000000ff
+static void ioinst_handle_chsc_scpd(ChscReq *req, ChscResp *res)
+{
+    uint16_t len = be16_to_cpu(req->len);
+    uint32_t param0 = be32_to_cpu(req->param0);
+    uint32_t param1 = be32_to_cpu(req->param1);
+    uint16_t resp_code;
+    int rfmt;
+    uint16_t cssid;
+    uint8_t f_chpid, l_chpid;
+    int desc_size;
+    int m;
+
+    rfmt = (param0 & CHSC_SCPD_0_RFMT) >> 8;
+    if ((rfmt == 0) ||  (rfmt == 1)) {
+        rfmt = !!(param0 & CHSC_SCPD_0_C);
+    }
+    if ((len != 0x0010) || (param0 & CHSC_SCPD_0_RES) ||
+        (param1 & CHSC_SCPD_1_RES) || req->param2) {
+        resp_code = 0x0003;
+        goto out_err;
+    }
+    if (param0 & CHSC_SCPD_0_FMT) {
+        resp_code = 0x0007;
+        goto out_err;
+    }
+    cssid = (param0 & CHSC_SCPD_0_CSSID) >> 16;
+    m = param0 & CHSC_SCPD_0_M;
+    if (cssid != 0) {
+        if (!m || !css_present(cssid)) {
+            resp_code = 0x0008;
+            goto out_err;
+        }
+    }
+    f_chpid = param0 & CHSC_SCPD_01_CHPID;
+    l_chpid = param1 & CHSC_SCPD_01_CHPID;
+    if (l_chpid < f_chpid) {
+        resp_code = 0x0003;
+        goto out_err;
+    }
+    /* css_collect_chp_desc() is endian-aware */
+    desc_size = css_collect_chp_desc(m, cssid, f_chpid, l_chpid, rfmt,
+                                     &res->data);
+    res->code = cpu_to_be16(0x0001);
+    res->len = cpu_to_be16(8 + desc_size);
+    res->param = cpu_to_be32(rfmt);
+    return;
+
+  out_err:
+    res->code = cpu_to_be16(resp_code);
+    res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+    res->param = cpu_to_be32(rfmt);
+}
+
+#define CHSC_SCSC_0_M 0x20000000
+#define CHSC_SCSC_0_FMT 0x000f0000
+#define CHSC_SCSC_0_CSSID 0x0000ff00
+#define CHSC_SCSC_0_RES 0xdff000ff
+static void ioinst_handle_chsc_scsc(ChscReq *req, ChscResp *res)
+{
+    uint16_t len = be16_to_cpu(req->len);
+    uint32_t param0 = be32_to_cpu(req->param0);
+    uint8_t cssid;
+    uint16_t resp_code;
+    uint32_t general_chars[510];
+    uint32_t chsc_chars[508];
+
+    if (len != 0x0010) {
+        resp_code = 0x0003;
+        goto out_err;
+    }
+
+    if (param0 & CHSC_SCSC_0_FMT) {
+        resp_code = 0x0007;
+        goto out_err;
+    }
+    cssid = (param0 & CHSC_SCSC_0_CSSID) >> 8;
+    if (cssid != 0) {
+        if (!(param0 & CHSC_SCSC_0_M) || !css_present(cssid)) {
+            resp_code = 0x0008;
+            goto out_err;
+        }
+    }
+    if ((param0 & CHSC_SCSC_0_RES) || req->param1 || req->param2) {
+        resp_code = 0x0003;
+        goto out_err;
+    }
+    res->code = cpu_to_be16(0x0001);
+    res->len = cpu_to_be16(4080);
+    res->param = 0;
+
+    memset(general_chars, 0, sizeof(general_chars));
+    memset(chsc_chars, 0, sizeof(chsc_chars));
+
+    general_chars[0] = cpu_to_be32(0x03000000);
+    general_chars[1] = cpu_to_be32(0x00059000);
+
+    chsc_chars[0] = cpu_to_be32(0x40000000);
+    chsc_chars[3] = cpu_to_be32(0x00040000);
+
+    memcpy(res->data, general_chars, sizeof(general_chars));
+    memcpy(res->data + sizeof(general_chars), chsc_chars, sizeof(chsc_chars));
+    return;
+
+  out_err:
+    res->code = cpu_to_be16(resp_code);
+    res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+    res->param = 0;
+}
+
+#define CHSC_SDA_0_FMT 0x0f000000
+#define CHSC_SDA_0_OC 0x0000ffff
+#define CHSC_SDA_0_RES 0xf0ff0000
+#define CHSC_SDA_OC_MCSSE 0x0
+#define CHSC_SDA_OC_MSS 0x2
+static void ioinst_handle_chsc_sda(ChscReq *req, ChscResp *res)
+{
+    uint16_t resp_code = 0x0001;
+    uint16_t len = be16_to_cpu(req->len);
+    uint32_t param0 = be32_to_cpu(req->param0);
+    uint16_t oc;
+    int ret;
+
+    if ((len != 0x0400) || (param0 & CHSC_SDA_0_RES)) {
+        resp_code = 0x0003;
+        goto out;
+    }
+
+    if (param0 & CHSC_SDA_0_FMT) {
+        resp_code = 0x0007;
+        goto out;
+    }
+
+    oc = param0 & CHSC_SDA_0_OC;
+    switch (oc) {
+    case CHSC_SDA_OC_MCSSE:
+        ret = css_enable_mcsse();
+        if (ret == -EINVAL) {
+            resp_code = 0x0101;
+            goto out;
+        }
+        break;
+    case CHSC_SDA_OC_MSS:
+        ret = css_enable_mss();
+        if (ret == -EINVAL) {
+            resp_code = 0x0101;
+            goto out;
+        }
+        break;
+    default:
+        resp_code = 0x0003;
+        goto out;
+    }
+
+out:
+    res->code = cpu_to_be16(resp_code);
+    res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+    res->param = 0;
+}
+
+static int chsc_sei_nt0_get_event(void *res)
+{
+    /* no events yet */
+    return 1;
+}
+
+static int chsc_sei_nt0_have_event(void)
+{
+    /* no events yet */
+    return 0;
+}
+
+#define CHSC_SEI_NT0    (1ULL << 63)
+#define CHSC_SEI_NT2    (1ULL << 61)
+static void ioinst_handle_chsc_sei(ChscReq *req, ChscResp *res)
+{
+    uint64_t selection_mask = ldq_p(&req->param1);
+    uint8_t *res_flags = (uint8_t *)res->data;
+    int have_event = 0;
+    int have_more = 0;
+
+    /* regarding architecture nt0 can not be masked */
+    have_event = !chsc_sei_nt0_get_event(res);
+    have_more = chsc_sei_nt0_have_event();
+
+    if (selection_mask & CHSC_SEI_NT2) {
+        if (!have_event) {
+            have_event = !chsc_sei_nt2_get_event(res);
+        }
+
+        if (!have_more) {
+            have_more = chsc_sei_nt2_have_event();
+        }
+    }
+
+    if (have_event) {
+        res->code = cpu_to_be16(0x0001);
+        if (have_more) {
+            (*res_flags) |= 0x80;
+        } else {
+            (*res_flags) &= ~0x80;
+        }
+    } else {
+        res->code = cpu_to_be16(0x0004);
+    }
+}
+
+static void ioinst_handle_chsc_unimplemented(ChscResp *res)
+{
+    res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+    res->code = cpu_to_be16(0x0004);
+    res->param = 0;
+}
+
+void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
+{
+    ChscReq *req;
+    ChscResp *res;
+    uint64_t addr;
+    int reg;
+    uint16_t len;
+    uint16_t command;
+    CPUS390XState *env = &cpu->env;
+    uint8_t buf[TARGET_PAGE_SIZE];
+
+    trace_ioinst("chsc");
+    reg = (ipb >> 20) & 0x00f;
+    addr = env->regs[reg];
+    /* Page boundary? */
+    if (addr & 0xfff) {
+        program_interrupt(env, PGM_SPECIFICATION, 2);
+        return;
+    }
+    /*
+     * Reading sizeof(ChscReq) bytes is currently enough for all of our
+     * present CHSC sub-handlers ... if we ever need more, we should take
+     * care of req->len here first.
+     */
+    if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) {
+        return;
+    }
+    req = (ChscReq *)buf;
+    len = be16_to_cpu(req->len);
+    /* Length field valid? */
+    if ((len < 16) || (len > 4088) || (len & 7)) {
+        program_interrupt(env, PGM_OPERAND, 2);
+        return;
+    }
+    memset((char *)req + len, 0, TARGET_PAGE_SIZE - len);
+    res = (void *)((char *)req + len);
+    command = be16_to_cpu(req->command);
+    trace_ioinst_chsc_cmd(command, len);
+    switch (command) {
+    case CHSC_SCSC:
+        ioinst_handle_chsc_scsc(req, res);
+        break;
+    case CHSC_SCPD:
+        ioinst_handle_chsc_scpd(req, res);
+        break;
+    case CHSC_SDA:
+        ioinst_handle_chsc_sda(req, res);
+        break;
+    case CHSC_SEI:
+        ioinst_handle_chsc_sei(req, res);
+        break;
+    default:
+        ioinst_handle_chsc_unimplemented(res);
+        break;
+    }
+
+    if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res,
+                                 be16_to_cpu(res->len))) {
+        setcc(cpu, 0);    /* Command execution complete */
+    }
+}
+
+int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb)
+{
+    CPUS390XState *env = &cpu->env;
+    uint64_t addr;
+    int lowcore;
+    IOIntCode int_code;
+    hwaddr len;
+    int ret;
+    uint8_t ar;
+
+    trace_ioinst("tpi");
+    addr = decode_basedisp_s(env, ipb, &ar);
+    if (addr & 3) {
+        program_interrupt(env, PGM_SPECIFICATION, 2);
+        return -EIO;
+    }
+
+    lowcore = addr ? 0 : 1;
+    len = lowcore ? 8 /* two words */ : 12 /* three words */;
+    ret = css_do_tpi(&int_code, lowcore);
+    if (ret == 1) {
+        s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, ar, &int_code, len);
+    }
+    return ret;
+}
+
+#define SCHM_REG1_RES(_reg) (_reg & 0x000000000ffffffc)
+#define SCHM_REG1_MBK(_reg) ((_reg & 0x00000000f0000000) >> 28)
+#define SCHM_REG1_UPD(_reg) ((_reg & 0x0000000000000002) >> 1)
+#define SCHM_REG1_DCT(_reg) (_reg & 0x0000000000000001)
+
+void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
+                        uint32_t ipb)
+{
+    uint8_t mbk;
+    int update;
+    int dct;
+    CPUS390XState *env = &cpu->env;
+
+    trace_ioinst("schm");
+
+    if (SCHM_REG1_RES(reg1)) {
+        program_interrupt(env, PGM_OPERAND, 2);
+        return;
+    }
+
+    mbk = SCHM_REG1_MBK(reg1);
+    update = SCHM_REG1_UPD(reg1);
+    dct = SCHM_REG1_DCT(reg1);
+
+    if (update && (reg2 & 0x000000000000001f)) {
+        program_interrupt(env, PGM_OPERAND, 2);
+        return;
+    }
+
+    css_do_schm(mbk, update, dct, update ? reg2 : 0);
+}
+
+void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1)
+{
+    int cssid, ssid, schid, m;
+    SubchDev *sch;
+    int ret = -ENODEV;
+    int cc;
+
+    if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+        program_interrupt(&cpu->env, PGM_OPERAND, 2);
+        return;
+    }
+    trace_ioinst_sch_id("rsch", cssid, ssid, schid);
+    sch = css_find_subch(m, cssid, ssid, schid);
+    if (sch && css_subch_visible(sch)) {
+        ret = css_do_rsch(sch);
+    }
+    switch (ret) {
+    case -ENODEV:
+        cc = 3;
+        break;
+    case -EINVAL:
+        cc = 2;
+        break;
+    case 0:
+        cc = 0;
+        break;
+    default:
+        cc = 1;
+        break;
+    }
+    setcc(cpu, cc);
+}
+
+#define RCHP_REG1_RES(_reg) (_reg & 0x00000000ff00ff00)
+#define RCHP_REG1_CSSID(_reg) ((_reg & 0x0000000000ff0000) >> 16)
+#define RCHP_REG1_CHPID(_reg) (_reg & 0x00000000000000ff)
+void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1)
+{
+    int cc;
+    uint8_t cssid;
+    uint8_t chpid;
+    int ret;
+    CPUS390XState *env = &cpu->env;
+
+    if (RCHP_REG1_RES(reg1)) {
+        program_interrupt(env, PGM_OPERAND, 2);
+        return;
+    }
+
+    cssid = RCHP_REG1_CSSID(reg1);
+    chpid = RCHP_REG1_CHPID(reg1);
+
+    trace_ioinst_chp_id("rchp", cssid, chpid);
+
+    ret = css_do_rchp(cssid, chpid);
+
+    switch (ret) {
+    case -ENODEV:
+        cc = 3;
+        break;
+    case -EBUSY:
+        cc = 2;
+        break;
+    case 0:
+        cc = 0;
+        break;
+    default:
+        /* Invalid channel subsystem. */
+        program_interrupt(env, PGM_OPERAND, 2);
+        return;
+    }
+    setcc(cpu, cc);
+}
+
+#define SAL_REG1_INVALID(_reg) (_reg & 0x0000000080000000)
+void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1)
+{
+    /* We do not provide address limit checking, so let's suppress it. */
+    if (SAL_REG1_INVALID(reg1) || reg1 & 0x000000000000ffff) {
+        program_interrupt(&cpu->env, PGM_OPERAND, 2);
+    }
+}
diff --git a/qemu/target-s390x/ioinst.h b/qemu/target-s390x/ioinst.h
new file mode 100644
index 000000000..013cc9148
--- /dev/null
+++ b/qemu/target-s390x/ioinst.h
@@ -0,0 +1,246 @@
+/*
+ * S/390 channel I/O instructions
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+*/
+
+#ifndef IOINST_S390X_H
+#define IOINST_S390X_H
+/*
+ * Channel I/O related definitions, as defined in the Principles
+ * Of Operation (and taken from the Linux implementation).
+ */
+
+/* subchannel status word (command mode only) */
+typedef struct SCSW {
+    uint16_t flags;
+    uint16_t ctrl;
+    uint32_t cpa;
+    uint8_t dstat;
+    uint8_t cstat;
+    uint16_t count;
+} QEMU_PACKED SCSW;
+
+#define SCSW_FLAGS_MASK_KEY 0xf000
+#define SCSW_FLAGS_MASK_SCTL 0x0800
+#define SCSW_FLAGS_MASK_ESWF 0x0400
+#define SCSW_FLAGS_MASK_CC 0x0300
+#define SCSW_FLAGS_MASK_FMT 0x0080
+#define SCSW_FLAGS_MASK_PFCH 0x0040
+#define SCSW_FLAGS_MASK_ISIC 0x0020
+#define SCSW_FLAGS_MASK_ALCC 0x0010
+#define SCSW_FLAGS_MASK_SSI 0x0008
+#define SCSW_FLAGS_MASK_ZCC 0x0004
+#define SCSW_FLAGS_MASK_ECTL 0x0002
+#define SCSW_FLAGS_MASK_PNO 0x0001
+
+#define SCSW_CTRL_MASK_FCTL 0x7000
+#define SCSW_CTRL_MASK_ACTL 0x0fe0
+#define SCSW_CTRL_MASK_STCTL 0x001f
+
+#define SCSW_FCTL_CLEAR_FUNC 0x1000
+#define SCSW_FCTL_HALT_FUNC 0x2000
+#define SCSW_FCTL_START_FUNC 0x4000
+
+#define SCSW_ACTL_SUSP 0x0020
+#define SCSW_ACTL_DEVICE_ACTIVE 0x0040
+#define SCSW_ACTL_SUBCH_ACTIVE 0x0080
+#define SCSW_ACTL_CLEAR_PEND 0x0100
+#define SCSW_ACTL_HALT_PEND  0x0200
+#define SCSW_ACTL_START_PEND 0x0400
+#define SCSW_ACTL_RESUME_PEND 0x0800
+
+#define SCSW_STCTL_STATUS_PEND 0x0001
+#define SCSW_STCTL_SECONDARY 0x0002
+#define SCSW_STCTL_PRIMARY 0x0004
+#define SCSW_STCTL_INTERMEDIATE 0x0008
+#define SCSW_STCTL_ALERT 0x0010
+
+#define SCSW_DSTAT_ATTENTION     0x80
+#define SCSW_DSTAT_STAT_MOD      0x40
+#define SCSW_DSTAT_CU_END        0x20
+#define SCSW_DSTAT_BUSY          0x10
+#define SCSW_DSTAT_CHANNEL_END   0x08
+#define SCSW_DSTAT_DEVICE_END    0x04
+#define SCSW_DSTAT_UNIT_CHECK    0x02
+#define SCSW_DSTAT_UNIT_EXCEP    0x01
+
+#define SCSW_CSTAT_PCI           0x80
+#define SCSW_CSTAT_INCORR_LEN    0x40
+#define SCSW_CSTAT_PROG_CHECK    0x20
+#define SCSW_CSTAT_PROT_CHECK    0x10
+#define SCSW_CSTAT_DATA_CHECK    0x08
+#define SCSW_CSTAT_CHN_CTRL_CHK  0x04
+#define SCSW_CSTAT_INTF_CTRL_CHK 0x02
+#define SCSW_CSTAT_CHAIN_CHECK   0x01
+
+/* path management control word */
+typedef struct PMCW {
+    uint32_t intparm;
+    uint16_t flags;
+    uint16_t devno;
+    uint8_t  lpm;
+    uint8_t  pnom;
+    uint8_t  lpum;
+    uint8_t  pim;
+    uint16_t mbi;
+    uint8_t  pom;
+    uint8_t  pam;
+    uint8_t  chpid[8];
+    uint32_t chars;
+} QEMU_PACKED PMCW;
+
+#define PMCW_FLAGS_MASK_QF 0x8000
+#define PMCW_FLAGS_MASK_W 0x4000
+#define PMCW_FLAGS_MASK_ISC 0x3800
+#define PMCW_FLAGS_MASK_ENA 0x0080
+#define PMCW_FLAGS_MASK_LM 0x0060
+#define PMCW_FLAGS_MASK_MME 0x0018
+#define PMCW_FLAGS_MASK_MP 0x0004
+#define PMCW_FLAGS_MASK_TF 0x0002
+#define PMCW_FLAGS_MASK_DNV 0x0001
+#define PMCW_FLAGS_MASK_INVALID 0x0700
+
+#define PMCW_CHARS_MASK_ST 0x00e00000
+#define PMCW_CHARS_MASK_MBFC 0x00000004
+#define PMCW_CHARS_MASK_XMWME 0x00000002
+#define PMCW_CHARS_MASK_CSENSE 0x00000001
+#define PMCW_CHARS_MASK_INVALID 0xff1ffff8
+
+/* subchannel information block */
+typedef struct SCHIB {
+    PMCW pmcw;
+    SCSW scsw;
+    uint64_t mba;
+    uint8_t mda[4];
+} QEMU_PACKED SCHIB;
+
+/* interruption response block */
+typedef struct IRB {
+    SCSW scsw;
+    uint32_t esw[5];
+    uint32_t ecw[8];
+    uint32_t emw[8];
+} QEMU_PACKED IRB;
+
+/* operation request block */
+typedef struct ORB {
+    uint32_t intparm;
+    uint16_t ctrl0;
+    uint8_t lpm;
+    uint8_t ctrl1;
+    uint32_t cpa;
+} QEMU_PACKED ORB;
+
+#define ORB_CTRL0_MASK_KEY 0xf000
+#define ORB_CTRL0_MASK_SPND 0x0800
+#define ORB_CTRL0_MASK_STR 0x0400
+#define ORB_CTRL0_MASK_MOD 0x0200
+#define ORB_CTRL0_MASK_SYNC 0x0100
+#define ORB_CTRL0_MASK_FMT 0x0080
+#define ORB_CTRL0_MASK_PFCH 0x0040
+#define ORB_CTRL0_MASK_ISIC 0x0020
+#define ORB_CTRL0_MASK_ALCC 0x0010
+#define ORB_CTRL0_MASK_SSIC 0x0008
+#define ORB_CTRL0_MASK_C64 0x0002
+#define ORB_CTRL0_MASK_I2K 0x0001
+#define ORB_CTRL0_MASK_INVALID 0x0004
+
+#define ORB_CTRL1_MASK_ILS 0x80
+#define ORB_CTRL1_MASK_MIDAW 0x40
+#define ORB_CTRL1_MASK_ORBX 0x01
+#define ORB_CTRL1_MASK_INVALID 0x3e
+
+/* channel command word (type 0) */
+typedef struct CCW0 {
+        uint8_t cmd_code;
+        uint8_t cda0;
+        uint16_t cda1;
+        uint8_t flags;
+        uint8_t reserved;
+        uint16_t count;
+} QEMU_PACKED CCW0;
+
+/* channel command word (type 1) */
+typedef struct CCW1 {
+    uint8_t cmd_code;
+    uint8_t flags;
+    uint16_t count;
+    uint32_t cda;
+} QEMU_PACKED CCW1;
+
+#define CCW_FLAG_DC              0x80
+#define CCW_FLAG_CC              0x40
+#define CCW_FLAG_SLI             0x20
+#define CCW_FLAG_SKIP            0x10
+#define CCW_FLAG_PCI             0x08
+#define CCW_FLAG_IDA             0x04
+#define CCW_FLAG_SUSPEND         0x02
+
+#define CCW_CMD_NOOP             0x03
+#define CCW_CMD_BASIC_SENSE      0x04
+#define CCW_CMD_TIC              0x08
+#define CCW_CMD_SENSE_ID         0xe4
+
+typedef struct CRW {
+    uint16_t flags;
+    uint16_t rsid;
+} QEMU_PACKED CRW;
+
+#define CRW_FLAGS_MASK_S 0x4000
+#define CRW_FLAGS_MASK_R 0x2000
+#define CRW_FLAGS_MASK_C 0x1000
+#define CRW_FLAGS_MASK_RSC 0x0f00
+#define CRW_FLAGS_MASK_A 0x0080
+#define CRW_FLAGS_MASK_ERC 0x003f
+
+#define CRW_ERC_INIT 0x02
+#define CRW_ERC_IPI  0x04
+
+#define CRW_RSC_SUBCH 0x3
+#define CRW_RSC_CHP   0x4
+#define CRW_RSC_CSS   0xb
+
+/* I/O interruption code */
+typedef struct IOIntCode {
+    uint32_t subsys_id;
+    uint32_t intparm;
+    uint32_t interrupt_id;
+} QEMU_PACKED IOIntCode;
+
+/* schid disintegration */
+#define IOINST_SCHID_ONE(_schid)   ((_schid & 0x00010000) >> 16)
+#define IOINST_SCHID_M(_schid)     ((_schid & 0x00080000) >> 19)
+#define IOINST_SCHID_CSSID(_schid) ((_schid & 0xff000000) >> 24)
+#define IOINST_SCHID_SSID(_schid)  ((_schid & 0x00060000) >> 17)
+#define IOINST_SCHID_NR(_schid)    (_schid & 0x0000ffff)
+
+#define IO_INT_WORD_ISC(_int_word) ((_int_word & 0x38000000) >> 27)
+#define ISC_TO_ISC_BITS(_isc)      ((0x80 >> _isc) << 24)
+
+#define IO_INT_WORD_AI 0x80000000
+
+int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid,
+                                 int *schid);
+void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1);
+void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1);
+void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1);
+void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
+void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
+void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb);
+void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
+int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
+void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb);
+int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb);
+void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
+                        uint32_t ipb);
+void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1);
+void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1);
+void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1);
+
+#endif
diff --git a/qemu/target-s390x/kvm.c b/qemu/target-s390x/kvm.c
new file mode 100644
index 000000000..ae3a0affe
--- /dev/null
+++ b/qemu/target-s390x/kvm.c
@@ -0,0 +1,2245 @@
+/*
+ * QEMU S390x KVM implementation
+ *
+ * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
+ * Copyright IBM Corp. 2012
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * Contributions after 2012-10-29 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ *
+ * You should have received a copy of the GNU (Lesser) General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <linux/kvm.h>
+#include <asm/ptrace.h>
+
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "hw/hw.h"
+#include "cpu.h"
+#include "sysemu/device_tree.h"
+#include "qapi/qmp/qjson.h"
+#include "exec/gdbstub.h"
+#include "exec/address-spaces.h"
+#include "trace.h"
+#include "qapi-event.h"
+#include "hw/s390x/s390-pci-inst.h"
+#include "hw/s390x/s390-pci-bus.h"
+#include "hw/s390x/ipl.h"
+#include "hw/s390x/ebcdic.h"
+#include "exec/memattrs.h"
+
+/* #define DEBUG_KVM */
+
+#ifdef DEBUG_KVM
+#define DPRINTF(fmt, ...) \
+    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+#define kvm_vm_check_mem_attr(s, attr) \
+    kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr)
+
+#define IPA0_DIAG                       0x8300
+#define IPA0_SIGP                       0xae00
+#define IPA0_B2                         0xb200
+#define IPA0_B9                         0xb900
+#define IPA0_EB                         0xeb00
+#define IPA0_E3                         0xe300
+
+#define PRIV_B2_SCLP_CALL               0x20
+#define PRIV_B2_CSCH                    0x30
+#define PRIV_B2_HSCH                    0x31
+#define PRIV_B2_MSCH                    0x32
+#define PRIV_B2_SSCH                    0x33
+#define PRIV_B2_STSCH                   0x34
+#define PRIV_B2_TSCH                    0x35
+#define PRIV_B2_TPI                     0x36
+#define PRIV_B2_SAL                     0x37
+#define PRIV_B2_RSCH                    0x38
+#define PRIV_B2_STCRW                   0x39
+#define PRIV_B2_STCPS                   0x3a
+#define PRIV_B2_RCHP                    0x3b
+#define PRIV_B2_SCHM                    0x3c
+#define PRIV_B2_CHSC                    0x5f
+#define PRIV_B2_SIGA                    0x74
+#define PRIV_B2_XSCH                    0x76
+
+#define PRIV_EB_SQBS                    0x8a
+#define PRIV_EB_PCISTB                  0xd0
+#define PRIV_EB_SIC                     0xd1
+
+#define PRIV_B9_EQBS                    0x9c
+#define PRIV_B9_CLP                     0xa0
+#define PRIV_B9_PCISTG                  0xd0
+#define PRIV_B9_PCILG                   0xd2
+#define PRIV_B9_RPCIT                   0xd3
+
+#define PRIV_E3_MPCIFC                  0xd0
+#define PRIV_E3_STPCIFC                 0xd4
+
+#define DIAG_TIMEREVENT                 0x288
+#define DIAG_IPL                        0x308
+#define DIAG_KVM_HYPERCALL              0x500
+#define DIAG_KVM_BREAKPOINT             0x501
+
+#define ICPT_INSTRUCTION                0x04
+#define ICPT_PROGRAM                    0x08
+#define ICPT_EXT_INT                    0x14
+#define ICPT_WAITPSW                    0x1c
+#define ICPT_SOFT_INTERCEPT             0x24
+#define ICPT_CPU_STOP                   0x28
+#define ICPT_IO                         0x40
+
+#define NR_LOCAL_IRQS 32
+/*
+ * Needs to be big enough to contain max_cpus emergency signals
+ * and in addition NR_LOCAL_IRQS interrupts
+ */
+#define VCPU_IRQ_BUF_SIZE (sizeof(struct kvm_s390_irq) * \
+                           (max_cpus + NR_LOCAL_IRQS))
+
+static CPUWatchpoint hw_watchpoint;
+/*
+ * We don't use a list because this structure is also used to transmit the
+ * hardware breakpoints to the kernel.
+ */
+static struct kvm_hw_breakpoint *hw_breakpoints;
+static int nb_hw_breakpoints;
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+    KVM_CAP_LAST_INFO
+};
+
+static int cap_sync_regs;
+static int cap_async_pf;
+static int cap_mem_op;
+static int cap_s390_irq;
+
+static void *legacy_s390_alloc(size_t size, uint64_t *align);
+
+static int kvm_s390_query_mem_limit(KVMState *s, uint64_t *memory_limit)
+{
+    struct kvm_device_attr attr = {
+        .group = KVM_S390_VM_MEM_CTRL,
+        .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
+        .addr = (uint64_t) memory_limit,
+    };
+
+    return kvm_vm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
+}
+
+int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit)
+{
+    int rc;
+
+    struct kvm_device_attr attr = {
+        .group = KVM_S390_VM_MEM_CTRL,
+        .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
+        .addr = (uint64_t) &new_limit,
+    };
+
+    if (!kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_LIMIT_SIZE)) {
+        return 0;
+    }
+
+    rc = kvm_s390_query_mem_limit(s, hw_limit);
+    if (rc) {
+        return rc;
+    } else if (*hw_limit < new_limit) {
+        return -E2BIG;
+    }
+
+    return kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
+}
+
+void kvm_s390_clear_cmma_callback(void *opaque)
+{
+    int rc;
+    KVMState *s = opaque;
+    struct kvm_device_attr attr = {
+        .group = KVM_S390_VM_MEM_CTRL,
+        .attr = KVM_S390_VM_MEM_CLR_CMMA,
+    };
+
+    rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
+    trace_kvm_clear_cmma(rc);
+}
+
+static void kvm_s390_enable_cmma(KVMState *s)
+{
+    int rc;
+    struct kvm_device_attr attr = {
+        .group = KVM_S390_VM_MEM_CTRL,
+        .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
+    };
+
+    if (!kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_ENABLE_CMMA) ||
+        !kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_CLR_CMMA)) {
+        return;
+    }
+
+    rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
+    if (!rc) {
+        qemu_register_reset(kvm_s390_clear_cmma_callback, s);
+    }
+    trace_kvm_enable_cmma(rc);
+}
+
+static void kvm_s390_set_attr(uint64_t attr)
+{
+    struct kvm_device_attr attribute = {
+        .group = KVM_S390_VM_CRYPTO,
+        .attr  = attr,
+    };
+
+    int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
+
+    if (ret) {
+        error_report("Failed to set crypto device attribute %lu: %s",
+                     attr, strerror(-ret));
+    }
+}
+
+static void kvm_s390_init_aes_kw(void)
+{
+    uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW;
+
+    if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap",
+                                 NULL)) {
+            attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW;
+    }
+
+    if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
+            kvm_s390_set_attr(attr);
+    }
+}
+
+static void kvm_s390_init_dea_kw(void)
+{
+    uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW;
+
+    if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap",
+                                 NULL)) {
+            attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW;
+    }
+
+    if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
+            kvm_s390_set_attr(attr);
+    }
+}
+
+static void kvm_s390_init_crypto(void)
+{
+    kvm_s390_init_aes_kw();
+    kvm_s390_init_dea_kw();
+}
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+    cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
+    cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
+    cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
+    cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ);
+
+    kvm_s390_enable_cmma(s);
+
+    if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
+        || !kvm_check_extension(s, KVM_CAP_S390_COW)) {
+        phys_mem_set_alloc(legacy_s390_alloc);
+    }
+
+    kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
+    kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0);
+    kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
+
+    return 0;
+}
+
+unsigned long kvm_arch_vcpu_id(CPUState *cpu)
+{
+    return cpu->cpu_index;
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
+    cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE);
+    return 0;
+}
+
+void kvm_s390_reset_vcpu(S390CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+
+    /* The initial reset call is needed here to reset in-kernel
+     * vcpu data that we can't access directly from QEMU
+     * (i.e. with older kernels which don't support sync_regs/ONE_REG).
+     * Before this ioctl cpu_synchronize_state() is called in common kvm
+     * code (kvm-all) */
+    if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) {
+        error_report("Initial CPU reset failed on CPU %i", cs->cpu_index);
+    }
+
+    kvm_s390_init_crypto();
+}
+
+static int can_sync_regs(CPUState *cs, int regs)
+{
+    return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
+    struct kvm_sregs sregs;
+    struct kvm_regs regs;
+    struct kvm_fpu fpu = {};
+    int r;
+    int i;
+
+    /* always save the PSW  and the GPRS*/
+    cs->kvm_run->psw_addr = env->psw.addr;
+    cs->kvm_run->psw_mask = env->psw.mask;
+
+    if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
+        for (i = 0; i < 16; i++) {
+            cs->kvm_run->s.regs.gprs[i] = env->regs[i];
+            cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
+        }
+    } else {
+        for (i = 0; i < 16; i++) {
+            regs.gprs[i] = env->regs[i];
+        }
+        r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
+        if (r < 0) {
+            return r;
+        }
+    }
+
+    if (can_sync_regs(cs, KVM_SYNC_VRS)) {
+        for (i = 0; i < 32; i++) {
+            cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0].ll;
+            cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1].ll;
+        }
+        cs->kvm_run->s.regs.fpc = env->fpc;
+        cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS;
+    } else {
+        /* Floating point */
+        for (i = 0; i < 16; i++) {
+            fpu.fprs[i] = get_freg(env, i)->ll;
+        }
+        fpu.fpc = env->fpc;
+
+        r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
+        if (r < 0) {
+            return r;
+        }
+    }
+
+    /* Do we need to save more than that? */
+    if (level == KVM_PUT_RUNTIME_STATE) {
+        return 0;
+    }
+
+    if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
+        cs->kvm_run->s.regs.cputm = env->cputm;
+        cs->kvm_run->s.regs.ckc = env->ckc;
+        cs->kvm_run->s.regs.todpr = env->todpr;
+        cs->kvm_run->s.regs.gbea = env->gbea;
+        cs->kvm_run->s.regs.pp = env->pp;
+        cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
+    } else {
+        /*
+         * These ONE_REGS are not protected by a capability. As they are only
+         * necessary for migration we just trace a possible error, but don't
+         * return with an error return code.
+         */
+        kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
+        kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
+        kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
+        kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
+        kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
+    }
+
+    /* pfault parameters */
+    if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
+        cs->kvm_run->s.regs.pft = env->pfault_token;
+        cs->kvm_run->s.regs.pfs = env->pfault_select;
+        cs->kvm_run->s.regs.pfc = env->pfault_compare;
+        cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
+    } else if (cap_async_pf) {
+        r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
+        if (r < 0) {
+            return r;
+        }
+        r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
+        if (r < 0) {
+            return r;
+        }
+        r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
+        if (r < 0) {
+            return r;
+        }
+    }
+
+    /* access registers and control registers*/
+    if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
+        for (i = 0; i < 16; i++) {
+            cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
+            cs->kvm_run->s.regs.crs[i] = env->cregs[i];
+        }
+        cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
+        cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
+    } else {
+        for (i = 0; i < 16; i++) {
+            sregs.acrs[i] = env->aregs[i];
+            sregs.crs[i] = env->cregs[i];
+        }
+        r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
+        if (r < 0) {
+            return r;
+        }
+    }
+
+    /* Finally the prefix */
+    if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
+        cs->kvm_run->s.regs.prefix = env->psa;
+        cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
+    } else {
+        /* prefix is only supported via sync regs */
+    }
+    return 0;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
+    struct kvm_sregs sregs;
+    struct kvm_regs regs;
+    struct kvm_fpu fpu;
+    int i, r;
+
+    /* get the PSW */
+    env->psw.addr = cs->kvm_run->psw_addr;
+    env->psw.mask = cs->kvm_run->psw_mask;
+
+    /* the GPRS */
+    if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
+        for (i = 0; i < 16; i++) {
+            env->regs[i] = cs->kvm_run->s.regs.gprs[i];
+        }
+    } else {
+        r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
+        if (r < 0) {
+            return r;
+        }
+         for (i = 0; i < 16; i++) {
+            env->regs[i] = regs.gprs[i];
+        }
+    }
+
+    /* The ACRS and CRS */
+    if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
+        for (i = 0; i < 16; i++) {
+            env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
+            env->cregs[i] = cs->kvm_run->s.regs.crs[i];
+        }
+    } else {
+        r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
+        if (r < 0) {
+            return r;
+        }
+         for (i = 0; i < 16; i++) {
+            env->aregs[i] = sregs.acrs[i];
+            env->cregs[i] = sregs.crs[i];
+        }
+    }
+
+    /* Floating point and vector registers */
+    if (can_sync_regs(cs, KVM_SYNC_VRS)) {
+        for (i = 0; i < 32; i++) {
+            env->vregs[i][0].ll = cs->kvm_run->s.regs.vrs[i][0];
+            env->vregs[i][1].ll = cs->kvm_run->s.regs.vrs[i][1];
+        }
+        env->fpc = cs->kvm_run->s.regs.fpc;
+    } else {
+        r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
+        if (r < 0) {
+            return r;
+        }
+        for (i = 0; i < 16; i++) {
+            get_freg(env, i)->ll = fpu.fprs[i];
+        }
+        env->fpc = fpu.fpc;
+    }
+
+    /* The prefix */
+    if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
+        env->psa = cs->kvm_run->s.regs.prefix;
+    }
+
+    if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
+        env->cputm = cs->kvm_run->s.regs.cputm;
+        env->ckc = cs->kvm_run->s.regs.ckc;
+        env->todpr = cs->kvm_run->s.regs.todpr;
+        env->gbea = cs->kvm_run->s.regs.gbea;
+        env->pp = cs->kvm_run->s.regs.pp;
+    } else {
+        /*
+         * These ONE_REGS are not protected by a capability. As they are only
+         * necessary for migration we just trace a possible error, but don't
+         * return with an error return code.
+         */
+        kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
+        kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
+        kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
+        kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
+        kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
+    }
+
+    /* pfault parameters */
+    if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
+        env->pfault_token = cs->kvm_run->s.regs.pft;
+        env->pfault_select = cs->kvm_run->s.regs.pfs;
+        env->pfault_compare = cs->kvm_run->s.regs.pfc;
+    } else if (cap_async_pf) {
+        r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
+        if (r < 0) {
+            return r;
+        }
+        r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
+        if (r < 0) {
+            return r;
+        }
+        r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
+        if (r < 0) {
+            return r;
+        }
+    }
+
+    return 0;
+}
+
+int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+    int r;
+    struct kvm_device_attr attr = {
+        .group = KVM_S390_VM_TOD,
+        .attr = KVM_S390_VM_TOD_LOW,
+        .addr = (uint64_t)tod_low,
+    };
+
+    r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+    if (r) {
+        return r;
+    }
+
+    attr.attr = KVM_S390_VM_TOD_HIGH;
+    attr.addr = (uint64_t)tod_high;
+    return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+}
+
+int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+    int r;
+
+    struct kvm_device_attr attr = {
+        .group = KVM_S390_VM_TOD,
+        .attr = KVM_S390_VM_TOD_LOW,
+        .addr = (uint64_t)tod_low,
+    };
+
+    r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+    if (r) {
+        return r;
+    }
+
+    attr.attr = KVM_S390_VM_TOD_HIGH;
+    attr.addr = (uint64_t)tod_high;
+    return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+}
+
+/**
+ * kvm_s390_mem_op:
+ * @addr:      the logical start address in guest memory
+ * @ar:        the access register number
+ * @hostbuf:   buffer in host memory. NULL = do only checks w/o copying
+ * @len:       length that should be transfered
+ * @is_write:  true = write, false = read
+ * Returns:    0 on success, non-zero if an exception or error occured
+ *
+ * Use KVM ioctl to read/write from/to guest memory. An access exception
+ * is injected into the vCPU in case of translation errors.
+ */
+int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
+                    int len, bool is_write)
+{
+    struct kvm_s390_mem_op mem_op = {
+        .gaddr = addr,
+        .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION,
+        .size = len,
+        .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE
+                       : KVM_S390_MEMOP_LOGICAL_READ,
+        .buf = (uint64_t)hostbuf,
+        .ar = ar,
+    };
+    int ret;
+
+    if (!cap_mem_op) {
+        return -ENOSYS;
+    }
+    if (!hostbuf) {
+        mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
+    }
+
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
+    if (ret < 0) {
+        error_printf("KVM_S390_MEM_OP failed: %s\n", strerror(-ret));
+    }
+    return ret;
+}
+
+/*
+ * Legacy layout for s390:
+ * Older S390 KVM requires the topmost vma of the RAM to be
+ * smaller than an system defined value, which is at least 256GB.
+ * Larger systems have larger values. We put the guest between
+ * the end of data segment (system break) and this value. We
+ * use 32GB as a base to have enough room for the system break
+ * to grow. We also have to use MAP parameters that avoid
+ * read-only mapping of guest pages.
+ */
+static void *legacy_s390_alloc(size_t size, uint64_t *align)
+{
+    void *mem;
+
+    mem = mmap((void *) 0x800000000ULL, size,
+               PROT_EXEC|PROT_READ|PROT_WRITE,
+               MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+    return mem == MAP_FAILED ? NULL : mem;
+}
+
+/* DIAG 501 is used for sw breakpoints */
+static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+
+    if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+                            sizeof(diag_501), 0) ||
+        cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)diag_501,
+                            sizeof(diag_501), 1)) {
+        return -EINVAL;
+    }
+    return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+    uint8_t t[sizeof(diag_501)];
+
+    if (cpu_memory_rw_debug(cs, bp->pc, t, sizeof(diag_501), 0)) {
+        return -EINVAL;
+    } else if (memcmp(t, diag_501, sizeof(diag_501))) {
+        return -EINVAL;
+    } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+                                   sizeof(diag_501), 1)) {
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
+                                                    int len, int type)
+{
+    int n;
+
+    for (n = 0; n < nb_hw_breakpoints; n++) {
+        if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
+            (hw_breakpoints[n].len == len || len == -1)) {
+            return &hw_breakpoints[n];
+        }
+    }
+
+    return NULL;
+}
+
+static int insert_hw_breakpoint(target_ulong addr, int len, int type)
+{
+    int size;
+
+    if (find_hw_breakpoint(addr, len, type)) {
+        return -EEXIST;
+    }
+
+    size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
+
+    if (!hw_breakpoints) {
+        nb_hw_breakpoints = 0;
+        hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
+    } else {
+        hw_breakpoints =
+            (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
+    }
+
+    if (!hw_breakpoints) {
+        nb_hw_breakpoints = 0;
+        return -ENOMEM;
+    }
+
+    hw_breakpoints[nb_hw_breakpoints].addr = addr;
+    hw_breakpoints[nb_hw_breakpoints].len = len;
+    hw_breakpoints[nb_hw_breakpoints].type = type;
+
+    nb_hw_breakpoints++;
+
+    return 0;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+                                  target_ulong len, int type)
+{
+    switch (type) {
+    case GDB_BREAKPOINT_HW:
+        type = KVM_HW_BP;
+        break;
+    case GDB_WATCHPOINT_WRITE:
+        if (len < 1) {
+            return -EINVAL;
+        }
+        type = KVM_HW_WP_WRITE;
+        break;
+    default:
+        return -ENOSYS;
+    }
+    return insert_hw_breakpoint(addr, len, type);
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+                                  target_ulong len, int type)
+{
+    int size;
+    struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
+
+    if (bp == NULL) {
+        return -ENOENT;
+    }
+
+    nb_hw_breakpoints--;
+    if (nb_hw_breakpoints > 0) {
+        /*
+         * In order to trim the array, move the last element to the position to
+         * be removed - if necessary.
+         */
+        if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
+            *bp = hw_breakpoints[nb_hw_breakpoints];
+        }
+        size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
+        hw_breakpoints =
+             (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size);
+    } else {
+        g_free(hw_breakpoints);
+        hw_breakpoints = NULL;
+    }
+
+    return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+    nb_hw_breakpoints = 0;
+    g_free(hw_breakpoints);
+    hw_breakpoints = NULL;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
+{
+    int i;
+
+    if (nb_hw_breakpoints > 0) {
+        dbg->arch.nr_hw_bp = nb_hw_breakpoints;
+        dbg->arch.hw_bp = hw_breakpoints;
+
+        for (i = 0; i < nb_hw_breakpoints; ++i) {
+            hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
+                                                       hw_breakpoints[i].addr);
+        }
+        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+    } else {
+        dbg->arch.nr_hw_bp = 0;
+        dbg->arch.hw_bp = NULL;
+    }
+}
+
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
+{
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+{
+    return MEMTXATTRS_UNSPECIFIED;
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+    return cs->halted;
+}
+
+static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
+                                     struct kvm_s390_interrupt *interrupt)
+{
+    int r = 0;
+
+    interrupt->type = irq->type;
+    switch (irq->type) {
+    case KVM_S390_INT_VIRTIO:
+        interrupt->parm = irq->u.ext.ext_params;
+        /* fall through */
+    case KVM_S390_INT_PFAULT_INIT:
+    case KVM_S390_INT_PFAULT_DONE:
+        interrupt->parm64 = irq->u.ext.ext_params2;
+        break;
+    case KVM_S390_PROGRAM_INT:
+        interrupt->parm = irq->u.pgm.code;
+        break;
+    case KVM_S390_SIGP_SET_PREFIX:
+        interrupt->parm = irq->u.prefix.address;
+        break;
+    case KVM_S390_INT_SERVICE:
+        interrupt->parm = irq->u.ext.ext_params;
+        break;
+    case KVM_S390_MCHK:
+        interrupt->parm = irq->u.mchk.cr14;
+        interrupt->parm64 = irq->u.mchk.mcic;
+        break;
+    case KVM_S390_INT_EXTERNAL_CALL:
+        interrupt->parm = irq->u.extcall.code;
+        break;
+    case KVM_S390_INT_EMERGENCY:
+        interrupt->parm = irq->u.emerg.code;
+        break;
+    case KVM_S390_SIGP_STOP:
+    case KVM_S390_RESTART:
+        break; /* These types have no parameters */
+    case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+        interrupt->parm = irq->u.io.subchannel_id << 16;
+        interrupt->parm |= irq->u.io.subchannel_nr;
+        interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
+        interrupt->parm64 |= irq->u.io.io_int_word;
+        break;
+    default:
+        r = -EINVAL;
+        break;
+    }
+    return r;
+}
+
+static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq)
+{
+    struct kvm_s390_interrupt kvmint = {};
+    int r;
+
+    r = s390_kvm_irq_to_interrupt(irq, &kvmint);
+    if (r < 0) {
+        fprintf(stderr, "%s called with bogus interrupt\n", __func__);
+        exit(1);
+    }
+
+    r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
+    if (r < 0) {
+        fprintf(stderr, "KVM failed to inject interrupt\n");
+        exit(1);
+    }
+}
+
+void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
+{
+    CPUState *cs = CPU(cpu);
+    int r;
+
+    if (cap_s390_irq) {
+        r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq);
+        if (!r) {
+            return;
+        }
+        error_report("KVM failed to inject interrupt %llx", irq->type);
+        exit(1);
+    }
+
+    inject_vcpu_irq_legacy(cs, irq);
+}
+
+static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
+{
+    struct kvm_s390_interrupt kvmint = {};
+    int r;
+
+    r = s390_kvm_irq_to_interrupt(irq, &kvmint);
+    if (r < 0) {
+        fprintf(stderr, "%s called with bogus interrupt\n", __func__);
+        exit(1);
+    }
+
+    r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
+    if (r < 0) {
+        fprintf(stderr, "KVM failed to inject interrupt\n");
+        exit(1);
+    }
+}
+
+void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
+{
+    static bool use_flic = true;
+    int r;
+
+    if (use_flic) {
+        r = kvm_s390_inject_flic(irq);
+        if (r == -ENOSYS) {
+            use_flic = false;
+        }
+        if (!r) {
+            return;
+        }
+    }
+    __kvm_s390_floating_interrupt(irq);
+}
+
+void kvm_s390_virtio_irq(int config_change, uint64_t token)
+{
+    struct kvm_s390_irq irq = {
+        .type = KVM_S390_INT_VIRTIO,
+        .u.ext.ext_params = config_change,
+        .u.ext.ext_params2 = token,
+    };
+
+    kvm_s390_floating_interrupt(&irq);
+}
+
+void kvm_s390_service_interrupt(uint32_t parm)
+{
+    struct kvm_s390_irq irq = {
+        .type = KVM_S390_INT_SERVICE,
+        .u.ext.ext_params = parm,
+    };
+
+    kvm_s390_floating_interrupt(&irq);
+}
+
+static void enter_pgmcheck(S390CPU *cpu, uint16_t code)
+{
+    struct kvm_s390_irq irq = {
+        .type = KVM_S390_PROGRAM_INT,
+        .u.pgm.code = code,
+    };
+
+    kvm_s390_vcpu_interrupt(cpu, &irq);
+}
+
+void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
+{
+    struct kvm_s390_irq irq = {
+        .type = KVM_S390_PROGRAM_INT,
+        .u.pgm.code = code,
+        .u.pgm.trans_exc_code = te_code,
+        .u.pgm.exc_access_id = te_code & 3,
+    };
+
+    kvm_s390_vcpu_interrupt(cpu, &irq);
+}
+
+static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
+                                 uint16_t ipbh0)
+{
+    CPUS390XState *env = &cpu->env;
+    uint64_t sccb;
+    uint32_t code;
+    int r = 0;
+
+    cpu_synchronize_state(CPU(cpu));
+    sccb = env->regs[ipbh0 & 0xf];
+    code = env->regs[(ipbh0 & 0xf0) >> 4];
+
+    r = sclp_service_call(env, sccb, code);
+    if (r < 0) {
+        enter_pgmcheck(cpu, -r);
+    } else {
+        setcc(cpu, r);
+    }
+
+    return 0;
+}
+
+static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
+{
+    CPUS390XState *env = &cpu->env;
+    int rc = 0;
+    uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
+
+    cpu_synchronize_state(CPU(cpu));
+
+    switch (ipa1) {
+    case PRIV_B2_XSCH:
+        ioinst_handle_xsch(cpu, env->regs[1]);
+        break;
+    case PRIV_B2_CSCH:
+        ioinst_handle_csch(cpu, env->regs[1]);
+        break;
+    case PRIV_B2_HSCH:
+        ioinst_handle_hsch(cpu, env->regs[1]);
+        break;
+    case PRIV_B2_MSCH:
+        ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
+        break;
+    case PRIV_B2_SSCH:
+        ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
+        break;
+    case PRIV_B2_STCRW:
+        ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
+        break;
+    case PRIV_B2_STSCH:
+        ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
+        break;
+    case PRIV_B2_TSCH:
+        /* We should only get tsch via KVM_EXIT_S390_TSCH. */
+        fprintf(stderr, "Spurious tsch intercept\n");
+        break;
+    case PRIV_B2_CHSC:
+        ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
+        break;
+    case PRIV_B2_TPI:
+        /* This should have been handled by kvm already. */
+        fprintf(stderr, "Spurious tpi intercept\n");
+        break;
+    case PRIV_B2_SCHM:
+        ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
+                           run->s390_sieic.ipb);
+        break;
+    case PRIV_B2_RSCH:
+        ioinst_handle_rsch(cpu, env->regs[1]);
+        break;
+    case PRIV_B2_RCHP:
+        ioinst_handle_rchp(cpu, env->regs[1]);
+        break;
+    case PRIV_B2_STCPS:
+        /* We do not provide this instruction, it is suppressed. */
+        break;
+    case PRIV_B2_SAL:
+        ioinst_handle_sal(cpu, env->regs[1]);
+        break;
+    case PRIV_B2_SIGA:
+        /* Not provided, set CC = 3 for subchannel not operational */
+        setcc(cpu, 3);
+        break;
+    case PRIV_B2_SCLP_CALL:
+        rc = kvm_sclp_service_call(cpu, run, ipbh0);
+        break;
+    default:
+        rc = -1;
+        DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
+        break;
+    }
+
+    return rc;
+}
+
+static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run,
+                                  uint8_t *ar)
+{
+    CPUS390XState *env = &cpu->env;
+    uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
+    uint32_t base2 = run->s390_sieic.ipb >> 28;
+    uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
+                     ((run->s390_sieic.ipb & 0xff00) << 4);
+
+    if (disp2 & 0x80000) {
+        disp2 += 0xfff00000;
+    }
+    if (ar) {
+        *ar = base2;
+    }
+
+    return (base2 ? env->regs[base2] : 0) +
+           (x2 ? env->regs[x2] : 0) + (long)(int)disp2;
+}
+
+static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run,
+                                  uint8_t *ar)
+{
+    CPUS390XState *env = &cpu->env;
+    uint32_t base2 = run->s390_sieic.ipb >> 28;
+    uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
+                     ((run->s390_sieic.ipb & 0xff00) << 4);
+
+    if (disp2 & 0x80000) {
+        disp2 += 0xfff00000;
+    }
+    if (ar) {
+        *ar = base2;
+    }
+
+    return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
+}
+
+static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+    uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+    return clp_service_call(cpu, r2);
+}
+
+static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+    uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
+    uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+    return pcilg_service_call(cpu, r1, r2);
+}
+
+static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+    uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
+    uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+    return pcistg_service_call(cpu, r1, r2);
+}
+
+static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+    uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+    uint64_t fiba;
+    uint8_t ar;
+
+    cpu_synchronize_state(CPU(cpu));
+    fiba = get_base_disp_rxy(cpu, run, &ar);
+
+    return stpcifc_service_call(cpu, r1, fiba, ar);
+}
+
+static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+    /* NOOP */
+    return 0;
+}
+
+static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+    uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
+    uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+    return rpcit_service_call(cpu, r1, r2);
+}
+
+static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+    uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+    uint8_t r3 = run->s390_sieic.ipa & 0x000f;
+    uint64_t gaddr;
+    uint8_t ar;
+
+    cpu_synchronize_state(CPU(cpu));
+    gaddr = get_base_disp_rsy(cpu, run, &ar);
+
+    return pcistb_service_call(cpu, r1, r3, gaddr, ar);
+}
+
+static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+    uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+    uint64_t fiba;
+    uint8_t ar;
+
+    cpu_synchronize_state(CPU(cpu));
+    fiba = get_base_disp_rxy(cpu, run, &ar);
+
+    return mpcifc_service_call(cpu, r1, fiba, ar);
+}
+
+static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
+{
+    int r = 0;
+
+    switch (ipa1) {
+    case PRIV_B9_CLP:
+        r = kvm_clp_service_call(cpu, run);
+        break;
+    case PRIV_B9_PCISTG:
+        r = kvm_pcistg_service_call(cpu, run);
+        break;
+    case PRIV_B9_PCILG:
+        r = kvm_pcilg_service_call(cpu, run);
+        break;
+    case PRIV_B9_RPCIT:
+        r = kvm_rpcit_service_call(cpu, run);
+        break;
+    case PRIV_B9_EQBS:
+        /* just inject exception */
+        r = -1;
+        break;
+    default:
+        r = -1;
+        DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
+        break;
+    }
+
+    return r;
+}
+
+static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
+{
+    int r = 0;
+
+    switch (ipbl) {
+    case PRIV_EB_PCISTB:
+        r = kvm_pcistb_service_call(cpu, run);
+        break;
+    case PRIV_EB_SIC:
+        r = kvm_sic_service_call(cpu, run);
+        break;
+    case PRIV_EB_SQBS:
+        /* just inject exception */
+        r = -1;
+        break;
+    default:
+        r = -1;
+        DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl);
+        break;
+    }
+
+    return r;
+}
+
+static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
+{
+    int r = 0;
+
+    switch (ipbl) {
+    case PRIV_E3_MPCIFC:
+        r = kvm_mpcifc_service_call(cpu, run);
+        break;
+    case PRIV_E3_STPCIFC:
+        r = kvm_stpcifc_service_call(cpu, run);
+        break;
+    default:
+        r = -1;
+        DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl);
+        break;
+    }
+
+    return r;
+}
+
+static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
+{
+    CPUS390XState *env = &cpu->env;
+    int ret;
+
+    cpu_synchronize_state(CPU(cpu));
+    ret = s390_virtio_hypercall(env);
+    if (ret == -EINVAL) {
+        enter_pgmcheck(cpu, PGM_SPECIFICATION);
+        return 0;
+    }
+
+    return ret;
+}
+
+static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run)
+{
+    uint64_t r1, r3;
+    int rc;
+
+    cpu_synchronize_state(CPU(cpu));
+    r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+    r3 = run->s390_sieic.ipa & 0x000f;
+    rc = handle_diag_288(&cpu->env, r1, r3);
+    if (rc) {
+        enter_pgmcheck(cpu, PGM_SPECIFICATION);
+    }
+}
+
+static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
+{
+    uint64_t r1, r3;
+
+    cpu_synchronize_state(CPU(cpu));
+    r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+    r3 = run->s390_sieic.ipa & 0x000f;
+    handle_diag_308(&cpu->env, r1, r3);
+}
+
+static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
+{
+    CPUS390XState *env = &cpu->env;
+    unsigned long pc;
+
+    cpu_synchronize_state(CPU(cpu));
+
+    pc = env->psw.addr - 4;
+    if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
+        env->psw.addr = pc;
+        return EXCP_DEBUG;
+    }
+
+    return -ENOENT;
+}
+
+#define DIAG_KVM_CODE_MASK 0x000000000000ffff
+
+static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
+{
+    int r = 0;
+    uint16_t func_code;
+
+    /*
+     * For any diagnose call we support, bits 48-63 of the resulting
+     * address specify the function code; the remainder is ignored.
+     */
+    func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK;
+    switch (func_code) {
+    case DIAG_TIMEREVENT:
+        kvm_handle_diag_288(cpu, run);
+        break;
+    case DIAG_IPL:
+        kvm_handle_diag_308(cpu, run);
+        break;
+    case DIAG_KVM_HYPERCALL:
+        r = handle_hypercall(cpu, run);
+        break;
+    case DIAG_KVM_BREAKPOINT:
+        r = handle_sw_breakpoint(cpu, run);
+        break;
+    default:
+        DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
+        enter_pgmcheck(cpu, PGM_SPECIFICATION);
+        break;
+    }
+
+    return r;
+}
+
+typedef struct SigpInfo {
+    S390CPU *cpu;
+    uint64_t param;
+    int cc;
+    uint64_t *status_reg;
+} SigpInfo;
+
+static void set_sigp_status(SigpInfo *si, uint64_t status)
+{
+    *si->status_reg &= 0xffffffff00000000ULL;
+    *si->status_reg |= status;
+    si->cc = SIGP_CC_STATUS_STORED;
+}
+
+static void sigp_start(void *arg)
+{
+    SigpInfo *si = arg;
+
+    if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
+        si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+        return;
+    }
+
+    s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
+    si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_stop(void *arg)
+{
+    SigpInfo *si = arg;
+    struct kvm_s390_irq irq = {
+        .type = KVM_S390_SIGP_STOP,
+    };
+
+    if (s390_cpu_get_state(si->cpu) != CPU_STATE_OPERATING) {
+        si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+        return;
+    }
+
+    /* disabled wait - sleeping in user space */
+    if (CPU(si->cpu)->halted) {
+        s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
+    } else {
+        /* execute the stop function */
+        si->cpu->env.sigp_order = SIGP_STOP;
+        kvm_s390_vcpu_interrupt(si->cpu, &irq);
+    }
+    si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+#define ADTL_SAVE_AREA_SIZE 1024
+static int kvm_s390_store_adtl_status(S390CPU *cpu, hwaddr addr)
+{
+    void *mem;
+    hwaddr len = ADTL_SAVE_AREA_SIZE;
+
+    mem = cpu_physical_memory_map(addr, &len, 1);
+    if (!mem) {
+        return -EFAULT;
+    }
+    if (len != ADTL_SAVE_AREA_SIZE) {
+        cpu_physical_memory_unmap(mem, len, 1, 0);
+        return -EFAULT;
+    }
+
+    memcpy(mem, &cpu->env.vregs, 512);
+
+    cpu_physical_memory_unmap(mem, len, 1, len);
+
+    return 0;
+}
+
+#define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
+#define SAVE_AREA_SIZE 512
+static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
+{
+    static const uint8_t ar_id = 1;
+    uint64_t ckc = cpu->env.ckc >> 8;
+    void *mem;
+    int i;
+    hwaddr len = SAVE_AREA_SIZE;
+
+    mem = cpu_physical_memory_map(addr, &len, 1);
+    if (!mem) {
+        return -EFAULT;
+    }
+    if (len != SAVE_AREA_SIZE) {
+        cpu_physical_memory_unmap(mem, len, 1, 0);
+        return -EFAULT;
+    }
+
+    if (store_arch) {
+        cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
+    }
+    for (i = 0; i < 16; ++i) {
+        *((uint64 *)mem + i) = get_freg(&cpu->env, i)->ll;
+    }
+    memcpy(mem + 128, &cpu->env.regs, 128);
+    memcpy(mem + 256, &cpu->env.psw, 16);
+    memcpy(mem + 280, &cpu->env.psa, 4);
+    memcpy(mem + 284, &cpu->env.fpc, 4);
+    memcpy(mem + 292, &cpu->env.todpr, 4);
+    memcpy(mem + 296, &cpu->env.cputm, 8);
+    memcpy(mem + 304, &ckc, 8);
+    memcpy(mem + 320, &cpu->env.aregs, 64);
+    memcpy(mem + 384, &cpu->env.cregs, 128);
+
+    cpu_physical_memory_unmap(mem, len, 1, len);
+
+    return 0;
+}
+
+static void sigp_stop_and_store_status(void *arg)
+{
+    SigpInfo *si = arg;
+    struct kvm_s390_irq irq = {
+        .type = KVM_S390_SIGP_STOP,
+    };
+
+    /* disabled wait - sleeping in user space */
+    if (s390_cpu_get_state(si->cpu) == CPU_STATE_OPERATING &&
+        CPU(si->cpu)->halted) {
+        s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
+    }
+
+    switch (s390_cpu_get_state(si->cpu)) {
+    case CPU_STATE_OPERATING:
+        si->cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
+        kvm_s390_vcpu_interrupt(si->cpu, &irq);
+        /* store will be performed when handling the stop intercept */
+        break;
+    case CPU_STATE_STOPPED:
+        /* already stopped, just store the status */
+        cpu_synchronize_state(CPU(si->cpu));
+        kvm_s390_store_status(si->cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
+        break;
+    }
+    si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_store_status_at_address(void *arg)
+{
+    SigpInfo *si = arg;
+    uint32_t address = si->param & 0x7ffffe00u;
+
+    /* cpu has to be stopped */
+    if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
+        set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+        return;
+    }
+
+    cpu_synchronize_state(CPU(si->cpu));
+
+    if (kvm_s390_store_status(si->cpu, address, false)) {
+        set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+        return;
+    }
+    si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_store_adtl_status(void *arg)
+{
+    SigpInfo *si = arg;
+
+    if (!kvm_check_extension(kvm_state, KVM_CAP_S390_VECTOR_REGISTERS)) {
+        set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+        return;
+    }
+
+    /* cpu has to be stopped */
+    if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
+        set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+        return;
+    }
+
+    /* parameter must be aligned to 1024-byte boundary */
+    if (si->param & 0x3ff) {
+        set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+        return;
+    }
+
+    cpu_synchronize_state(CPU(si->cpu));
+
+    if (kvm_s390_store_adtl_status(si->cpu, si->param)) {
+        set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+        return;
+    }
+    si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_restart(void *arg)
+{
+    SigpInfo *si = arg;
+    struct kvm_s390_irq irq = {
+        .type = KVM_S390_RESTART,
+    };
+
+    switch (s390_cpu_get_state(si->cpu)) {
+    case CPU_STATE_STOPPED:
+        /* the restart irq has to be delivered prior to any other pending irq */
+        cpu_synchronize_state(CPU(si->cpu));
+        do_restart_interrupt(&si->cpu->env);
+        s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
+        break;
+    case CPU_STATE_OPERATING:
+        kvm_s390_vcpu_interrupt(si->cpu, &irq);
+        break;
+    }
+    si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+int kvm_s390_cpu_restart(S390CPU *cpu)
+{
+    SigpInfo si = {
+        .cpu = cpu,
+    };
+
+    run_on_cpu(CPU(cpu), sigp_restart, &si);
+    DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
+    return 0;
+}
+
+static void sigp_initial_cpu_reset(void *arg)
+{
+    SigpInfo *si = arg;
+    CPUState *cs = CPU(si->cpu);
+    S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
+
+    cpu_synchronize_state(cs);
+    scc->initial_cpu_reset(cs);
+    cpu_synchronize_post_reset(cs);
+    si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_cpu_reset(void *arg)
+{
+    SigpInfo *si = arg;
+    CPUState *cs = CPU(si->cpu);
+    S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
+
+    cpu_synchronize_state(cs);
+    scc->cpu_reset(cs);
+    cpu_synchronize_post_reset(cs);
+    si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_set_prefix(void *arg)
+{
+    SigpInfo *si = arg;
+    uint32_t addr = si->param & 0x7fffe000u;
+
+    cpu_synchronize_state(CPU(si->cpu));
+
+    if (!address_space_access_valid(&address_space_memory, addr,
+                                    sizeof(struct LowCore), false)) {
+        set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+        return;
+    }
+
+    /* cpu has to be stopped */
+    if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
+        set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+        return;
+    }
+
+    si->cpu->env.psa = addr;
+    cpu_synchronize_post_init(CPU(si->cpu));
+    si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
+                                  uint64_t param, uint64_t *status_reg)
+{
+    SigpInfo si = {
+        .cpu = dst_cpu,
+        .param = param,
+        .status_reg = status_reg,
+    };
+
+    /* cpu available? */
+    if (dst_cpu == NULL) {
+        return SIGP_CC_NOT_OPERATIONAL;
+    }
+
+    /* only resets can break pending orders */
+    if (dst_cpu->env.sigp_order != 0 &&
+        order != SIGP_CPU_RESET &&
+        order != SIGP_INITIAL_CPU_RESET) {
+        return SIGP_CC_BUSY;
+    }
+
+    switch (order) {
+    case SIGP_START:
+        run_on_cpu(CPU(dst_cpu), sigp_start, &si);
+        break;
+    case SIGP_STOP:
+        run_on_cpu(CPU(dst_cpu), sigp_stop, &si);
+        break;
+    case SIGP_RESTART:
+        run_on_cpu(CPU(dst_cpu), sigp_restart, &si);
+        break;
+    case SIGP_STOP_STORE_STATUS:
+        run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, &si);
+        break;
+    case SIGP_STORE_STATUS_ADDR:
+        run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, &si);
+        break;
+    case SIGP_STORE_ADTL_STATUS:
+        run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, &si);
+        break;
+    case SIGP_SET_PREFIX:
+        run_on_cpu(CPU(dst_cpu), sigp_set_prefix, &si);
+        break;
+    case SIGP_INITIAL_CPU_RESET:
+        run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, &si);
+        break;
+    case SIGP_CPU_RESET:
+        run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, &si);
+        break;
+    default:
+        DPRINTF("KVM: unknown SIGP: 0x%x\n", order);
+        set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
+    }
+
+    return si.cc;
+}
+
+static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
+                                 uint64_t *status_reg)
+{
+    CPUState *cur_cs;
+    S390CPU *cur_cpu;
+
+    /* due to the BQL, we are the only active cpu */
+    CPU_FOREACH(cur_cs) {
+        cur_cpu = S390_CPU(cur_cs);
+        if (cur_cpu->env.sigp_order != 0) {
+            return SIGP_CC_BUSY;
+        }
+        cpu_synchronize_state(cur_cs);
+        /* all but the current one have to be stopped */
+        if (cur_cpu != cpu &&
+            s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) {
+            *status_reg &= 0xffffffff00000000ULL;
+            *status_reg |= SIGP_STAT_INCORRECT_STATE;
+            return SIGP_CC_STATUS_STORED;
+        }
+    }
+
+    switch (param & 0xff) {
+    case SIGP_MODE_ESA_S390:
+        /* not supported */
+        return SIGP_CC_NOT_OPERATIONAL;
+    case SIGP_MODE_Z_ARCH_TRANS_ALL_PSW:
+    case SIGP_MODE_Z_ARCH_TRANS_CUR_PSW:
+        CPU_FOREACH(cur_cs) {
+            cur_cpu = S390_CPU(cur_cs);
+            cur_cpu->env.pfault_token = -1UL;
+        }
+        break;
+    default:
+        *status_reg &= 0xffffffff00000000ULL;
+        *status_reg |= SIGP_STAT_INVALID_PARAMETER;
+        return SIGP_CC_STATUS_STORED;
+    }
+
+    return SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+#define SIGP_ORDER_MASK 0x000000ff
+
+static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
+{
+    CPUS390XState *env = &cpu->env;
+    const uint8_t r1 = ipa1 >> 4;
+    const uint8_t r3 = ipa1 & 0x0f;
+    int ret;
+    uint8_t order;
+    uint64_t *status_reg;
+    uint64_t param;
+    S390CPU *dst_cpu = NULL;
+
+    cpu_synchronize_state(CPU(cpu));
+
+    /* get order code */
+    order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL)
+        & SIGP_ORDER_MASK;
+    status_reg = &env->regs[r1];
+    param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
+
+    switch (order) {
+    case SIGP_SET_ARCH:
+        ret = sigp_set_architecture(cpu, param, status_reg);
+        break;
+    default:
+        /* all other sigp orders target a single vcpu */
+        dst_cpu = s390_cpu_addr2state(env->regs[r3]);
+        ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg);
+    }
+
+    trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index,
+                            dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
+
+    if (ret >= 0) {
+        setcc(cpu, ret);
+        return 0;
+    }
+
+    return ret;
+}
+
+static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
+{
+    unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
+    uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
+    int r = -1;
+
+    DPRINTF("handle_instruction 0x%x 0x%x\n",
+            run->s390_sieic.ipa, run->s390_sieic.ipb);
+    switch (ipa0) {
+    case IPA0_B2:
+        r = handle_b2(cpu, run, ipa1);
+        break;
+    case IPA0_B9:
+        r = handle_b9(cpu, run, ipa1);
+        break;
+    case IPA0_EB:
+        r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
+        break;
+    case IPA0_E3:
+        r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
+        break;
+    case IPA0_DIAG:
+        r = handle_diag(cpu, run, run->s390_sieic.ipb);
+        break;
+    case IPA0_SIGP:
+        r = handle_sigp(cpu, run, ipa1);
+        break;
+    }
+
+    if (r < 0) {
+        r = 0;
+        enter_pgmcheck(cpu, 0x0001);
+    }
+
+    return r;
+}
+
+static bool is_special_wait_psw(CPUState *cs)
+{
+    /* signal quiesce */
+    return cs->kvm_run->psw_addr == 0xfffUL;
+}
+
+static void guest_panicked(void)
+{
+    qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE,
+                                   &error_abort);
+    vm_stop(RUN_STATE_GUEST_PANICKED);
+}
+
+static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
+{
+    CPUState *cs = CPU(cpu);
+
+    error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx",
+                 str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset),
+                 ldq_phys(cs->as, cpu->env.psa + pswoffset + 8));
+    s390_cpu_halt(cpu);
+    guest_panicked();
+}
+
+static int handle_intercept(S390CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    struct kvm_run *run = cs->kvm_run;
+    int icpt_code = run->s390_sieic.icptcode;
+    int r = 0;
+
+    DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code,
+            (long)cs->kvm_run->psw_addr);
+    switch (icpt_code) {
+        case ICPT_INSTRUCTION:
+            r = handle_instruction(cpu, run);
+            break;
+        case ICPT_PROGRAM:
+            unmanageable_intercept(cpu, "program interrupt",
+                                   offsetof(LowCore, program_new_psw));
+            r = EXCP_HALTED;
+            break;
+        case ICPT_EXT_INT:
+            unmanageable_intercept(cpu, "external interrupt",
+                                   offsetof(LowCore, external_new_psw));
+            r = EXCP_HALTED;
+            break;
+        case ICPT_WAITPSW:
+            /* disabled wait, since enabled wait is handled in kernel */
+            cpu_synchronize_state(cs);
+            if (s390_cpu_halt(cpu) == 0) {
+                if (is_special_wait_psw(cs)) {
+                    qemu_system_shutdown_request();
+                } else {
+                    guest_panicked();
+                }
+            }
+            r = EXCP_HALTED;
+            break;
+        case ICPT_CPU_STOP:
+            if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) {
+                qemu_system_shutdown_request();
+            }
+            if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
+                kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR,
+                                      true);
+            }
+            cpu->env.sigp_order = 0;
+            r = EXCP_HALTED;
+            break;
+        case ICPT_SOFT_INTERCEPT:
+            fprintf(stderr, "KVM unimplemented icpt SOFT\n");
+            exit(1);
+            break;
+        case ICPT_IO:
+            fprintf(stderr, "KVM unimplemented icpt IO\n");
+            exit(1);
+            break;
+        default:
+            fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
+            exit(1);
+            break;
+    }
+
+    return r;
+}
+
+static int handle_tsch(S390CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    struct kvm_run *run = cs->kvm_run;
+    int ret;
+
+    cpu_synchronize_state(cs);
+
+    ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb);
+    if (ret < 0) {
+        /*
+         * Failure.
+         * If an I/O interrupt had been dequeued, we have to reinject it.
+         */
+        if (run->s390_tsch.dequeued) {
+            kvm_s390_io_interrupt(run->s390_tsch.subchannel_id,
+                                  run->s390_tsch.subchannel_nr,
+                                  run->s390_tsch.io_int_parm,
+                                  run->s390_tsch.io_int_word);
+        }
+        ret = 0;
+    }
+    return ret;
+}
+
+static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
+{
+    struct sysib_322 sysib;
+    int del;
+
+    if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
+        return;
+    }
+    /* Shift the stack of Extended Names to prepare for our own data */
+    memmove(&sysib.ext_names[1], &sysib.ext_names[0],
+            sizeof(sysib.ext_names[0]) * (sysib.count - 1));
+    /* First virt level, that doesn't provide Ext Names delimits stack. It is
+     * assumed it's not capable of managing Extended Names for lower levels.
+     */
+    for (del = 1; del < sysib.count; del++) {
+        if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) {
+            break;
+        }
+    }
+    if (del < sysib.count) {
+        memset(sysib.ext_names[del], 0,
+               sizeof(sysib.ext_names[0]) * (sysib.count - del));
+    }
+    /* Insert short machine name in EBCDIC, padded with blanks */
+    if (qemu_name) {
+        memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name));
+        ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name),
+                                                    strlen(qemu_name)));
+    }
+    sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */
+    memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0]));
+    /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's
+     * considered by s390 as not capable of providing any Extended Name.
+     * Therefore if no name was specified on qemu invocation, we go with the
+     * same "KVMguest" default, which KVM has filled into short name field.
+     */
+    if (qemu_name) {
+        strncpy((char *)sysib.ext_names[0], qemu_name,
+                sizeof(sysib.ext_names[0]));
+    } else {
+        strcpy((char *)sysib.ext_names[0], "KVMguest");
+    }
+    /* Insert UUID */
+    memcpy(sysib.vm[0].uuid, qemu_uuid, sizeof(sysib.vm[0].uuid));
+
+    s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
+}
+
+static int handle_stsi(S390CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    struct kvm_run *run = cs->kvm_run;
+
+    switch (run->s390_stsi.fc) {
+    case 3:
+        if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) {
+            return 0;
+        }
+        /* Only sysib 3.2.2 needs post-handling for now. */
+        insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar);
+        return 0;
+    default:
+        return 0;
+    }
+}
+
+static int kvm_arch_handle_debug_exit(S390CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    struct kvm_run *run = cs->kvm_run;
+
+    int ret = 0;
+    struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
+
+    switch (arch_info->type) {
+    case KVM_HW_WP_WRITE:
+        if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
+            cs->watchpoint_hit = &hw_watchpoint;
+            hw_watchpoint.vaddr = arch_info->addr;
+            hw_watchpoint.flags = BP_MEM_WRITE;
+            ret = EXCP_DEBUG;
+        }
+        break;
+    case KVM_HW_BP:
+        if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
+            ret = EXCP_DEBUG;
+        }
+        break;
+    case KVM_SINGLESTEP:
+        if (cs->singlestep_enabled) {
+            ret = EXCP_DEBUG;
+        }
+        break;
+    default:
+        ret = -ENOSYS;
+    }
+
+    return ret;
+}
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    int ret = 0;
+
+    qemu_mutex_lock_iothread();
+
+    switch (run->exit_reason) {
+        case KVM_EXIT_S390_SIEIC:
+            ret = handle_intercept(cpu);
+            break;
+        case KVM_EXIT_S390_RESET:
+            s390_reipl_request();
+            break;
+        case KVM_EXIT_S390_TSCH:
+            ret = handle_tsch(cpu);
+            break;
+        case KVM_EXIT_S390_STSI:
+            ret = handle_stsi(cpu);
+            break;
+        case KVM_EXIT_DEBUG:
+            ret = kvm_arch_handle_debug_exit(cpu);
+            break;
+        default:
+            fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
+            break;
+    }
+    qemu_mutex_unlock_iothread();
+
+    if (ret == 0) {
+        ret = EXCP_INTERRUPT;
+    }
+    return ret;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
+{
+    return true;
+}
+
+int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
+{
+    return 1;
+}
+
+int kvm_arch_on_sigbus(int code, void *addr)
+{
+    return 1;
+}
+
+void kvm_s390_io_interrupt(uint16_t subchannel_id,
+                           uint16_t subchannel_nr, uint32_t io_int_parm,
+                           uint32_t io_int_word)
+{
+    struct kvm_s390_irq irq = {
+        .u.io.subchannel_id = subchannel_id,
+        .u.io.subchannel_nr = subchannel_nr,
+        .u.io.io_int_parm = io_int_parm,
+        .u.io.io_int_word = io_int_word,
+    };
+
+    if (io_int_word & IO_INT_WORD_AI) {
+        irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
+    } else {
+        irq.type = ((subchannel_id & 0xff00) << 24) |
+            ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16);
+    }
+    kvm_s390_floating_interrupt(&irq);
+}
+
+void kvm_s390_crw_mchk(void)
+{
+    struct kvm_s390_irq irq = {
+        .type = KVM_S390_MCHK,
+        .u.mchk.cr14 = 1 << 28,
+        .u.mchk.mcic = 0x00400f1d40330000ULL,
+    };
+    kvm_s390_floating_interrupt(&irq);
+}
+
+void kvm_s390_enable_css_support(S390CPU *cpu)
+{
+    int r;
+
+    /* Activate host kernel channel subsystem support. */
+    r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
+    assert(r == 0);
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+    /*
+     * Note that while irqchip capabilities generally imply that cpustates
+     * are handled in-kernel, it is not true for s390 (yet); therefore, we
+     * have to override the common code kvm_halt_in_kernel_allowed setting.
+     */
+    if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
+        kvm_gsi_routing_allowed = true;
+        kvm_halt_in_kernel_allowed = false;
+    }
+}
+
+int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
+                                    int vq, bool assign)
+{
+    struct kvm_ioeventfd kick = {
+        .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
+        KVM_IOEVENTFD_FLAG_DATAMATCH,
+        .fd = event_notifier_get_fd(notifier),
+        .datamatch = vq,
+        .addr = sch,
+        .len = 8,
+    };
+    if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
+        return -ENOSYS;
+    }
+    if (!assign) {
+        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
+    }
+    return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+}
+
+int kvm_s390_get_memslot_count(KVMState *s)
+{
+    return kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
+}
+
+int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
+{
+    struct kvm_mp_state mp_state = {};
+    int ret;
+
+    /* the kvm part might not have been initialized yet */
+    if (CPU(cpu)->kvm_state == NULL) {
+        return 0;
+    }
+
+    switch (cpu_state) {
+    case CPU_STATE_STOPPED:
+        mp_state.mp_state = KVM_MP_STATE_STOPPED;
+        break;
+    case CPU_STATE_CHECK_STOP:
+        mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
+        break;
+    case CPU_STATE_OPERATING:
+        mp_state.mp_state = KVM_MP_STATE_OPERATING;
+        break;
+    case CPU_STATE_LOAD:
+        mp_state.mp_state = KVM_MP_STATE_LOAD;
+        break;
+    default:
+        error_report("Requested CPU state is not a valid S390 CPU state: %u",
+                     cpu_state);
+        exit(1);
+    }
+
+    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
+    if (ret) {
+        trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
+                                       strerror(-ret));
+    }
+
+    return ret;
+}
+
+void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
+{
+    struct kvm_s390_irq_state irq_state;
+    CPUState *cs = CPU(cpu);
+    int32_t bytes;
+
+    if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
+        return;
+    }
+
+    irq_state.buf = (uint64_t) cpu->irqstate;
+    irq_state.len = VCPU_IRQ_BUF_SIZE;
+
+    bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state);
+    if (bytes < 0) {
+        cpu->irqstate_saved_size = 0;
+        error_report("Migration of interrupt state failed");
+        return;
+    }
+
+    cpu->irqstate_saved_size = bytes;
+}
+
+int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
+{
+    CPUState *cs = CPU(cpu);
+    struct kvm_s390_irq_state irq_state;
+    int r;
+
+    if (cpu->irqstate_saved_size == 0) {
+        return 0;
+    }
+
+    if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
+        return -ENOSYS;
+    }
+
+    irq_state.buf = (uint64_t) cpu->irqstate;
+    irq_state.len = cpu->irqstate_saved_size;
+
+    r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state);
+    if (r) {
+        error_report("Setting interrupt state failed %d", r);
+    }
+    return r;
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+                              uint64_t address, uint32_t data)
+{
+    S390PCIBusDevice *pbdev;
+    uint32_t fid = data >> ZPCI_MSI_VEC_BITS;
+    uint32_t vec = data & ZPCI_MSI_VEC_MASK;
+
+    pbdev = s390_pci_find_dev_by_fid(fid);
+    if (!pbdev) {
+        DPRINTF("add_msi_route no dev\n");
+        return -ENODEV;
+    }
+
+    pbdev->routes.adapter.ind_offset = vec;
+
+    route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
+    route->flags = 0;
+    route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
+    route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
+    route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
+    route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset;
+    route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
+    return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+    abort();
+}
diff --git a/qemu/target-s390x/machine.c b/qemu/target-s390x/machine.c
new file mode 100644
index 000000000..b76fb0831
--- /dev/null
+++ b/qemu/target-s390x/machine.c
@@ -0,0 +1,170 @@
+/*
+ * S390x machine definitions and functions
+ *
+ * Copyright IBM Corp. 2014
+ *
+ * Authors:
+ *   Thomas Huth <thuth@linux.vnet.ibm.com>
+ *   Christian Borntraeger <borntraeger@de.ibm.com>
+ *   Jason J. Herne <jjherne@us.ibm.com>
+ *
+ * This work is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include "hw/hw.h"
+#include "cpu.h"
+#include "sysemu/kvm.h"
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+    S390CPU *cpu = opaque;
+
+    /*
+     * As the cpu state is pushed to kvm via kvm_set_mp_state rather
+     * than via cpu_synchronize_state, we need update kvm here.
+     */
+    if (kvm_enabled()) {
+        kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
+        return kvm_s390_vcpu_interrupt_post_load(cpu);
+    }
+
+    return 0;
+}
+static void cpu_pre_save(void *opaque)
+{
+    S390CPU *cpu = opaque;
+
+    if (kvm_enabled()) {
+        kvm_s390_vcpu_interrupt_pre_save(cpu);
+    }
+}
+
+static inline bool fpu_needed(void *opaque)
+{
+    /* This looks odd, but we might want to NOT transfer fprs in the future */
+    return true;
+}
+
+const VMStateDescription vmstate_fpu = {
+    .name = "cpu/fpu",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = fpu_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.vregs[0][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[1][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[2][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[3][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[4][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[5][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[6][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[7][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[8][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[9][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[10][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[11][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[12][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[13][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[14][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[15][0].ll, S390CPU),
+        VMSTATE_UINT32(env.fpc, S390CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+const VMStateDescription vmstate_vregs = {
+    .name = "cpu/vregs",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = vregs_needed,
+    .fields = (VMStateField[]) {
+        /* vregs[0][0] -> vregs[15][0] and fregs are overlays */
+        VMSTATE_UINT64(env.vregs[16][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[17][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[18][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[19][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[20][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[21][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[22][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[23][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[24][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[25][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[26][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[27][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[28][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[29][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[30][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[31][0].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[0][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[1][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[2][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[3][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[4][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[5][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[6][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[7][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[8][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[9][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[10][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[11][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[12][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[13][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[14][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[15][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[16][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[17][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[18][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[19][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[20][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[21][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[22][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[23][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[24][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[25][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[26][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[27][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[28][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[29][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[30][1].ll, S390CPU),
+        VMSTATE_UINT64(env.vregs[31][1].ll, S390CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+const VMStateDescription vmstate_s390_cpu = {
+    .name = "cpu",
+    .post_load = cpu_post_load,
+    .pre_save = cpu_pre_save,
+    .version_id = 4,
+    .minimum_version_id = 3,
+    .fields      = (VMStateField[]) {
+        VMSTATE_UINT64_ARRAY(env.regs, S390CPU, 16),
+        VMSTATE_UINT64(env.psw.mask, S390CPU),
+        VMSTATE_UINT64(env.psw.addr, S390CPU),
+        VMSTATE_UINT64(env.psa, S390CPU),
+        VMSTATE_UINT32(env.todpr, S390CPU),
+        VMSTATE_UINT64(env.pfault_token, S390CPU),
+        VMSTATE_UINT64(env.pfault_compare, S390CPU),
+        VMSTATE_UINT64(env.pfault_select, S390CPU),
+        VMSTATE_UINT64(env.cputm, S390CPU),
+        VMSTATE_UINT64(env.ckc, S390CPU),
+        VMSTATE_UINT64(env.gbea, S390CPU),
+        VMSTATE_UINT64(env.pp, S390CPU),
+        VMSTATE_UINT32_ARRAY(env.aregs, S390CPU, 16),
+        VMSTATE_UINT64_ARRAY(env.cregs, S390CPU, 16),
+        VMSTATE_UINT8(env.cpu_state, S390CPU),
+        VMSTATE_UINT8(env.sigp_order, S390CPU),
+        VMSTATE_UINT32_V(irqstate_saved_size, S390CPU, 4),
+        VMSTATE_VBUFFER_UINT32(irqstate, S390CPU, 4, NULL, 0,
+                               irqstate_saved_size),
+        VMSTATE_END_OF_LIST()
+    },
+    .subsections = (const VMStateDescription*[]) {
+        &vmstate_fpu,
+        &vmstate_vregs,
+        NULL
+    },
+};
diff --git a/qemu/target-s390x/mem_helper.c b/qemu/target-s390x/mem_helper.c
new file mode 100644
index 000000000..6f8bd796a
--- /dev/null
+++ b/qemu/target-s390x/mem_helper.c
@@ -0,0 +1,1163 @@
+/*
+ *  S/390 memory access helper routines
+ *
+ *  Copyright (c) 2009 Ulrich Hecht
+ *  Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+
+/*****************************************************************************/
+/* Softmmu support */
+#if !defined(CONFIG_USER_ONLY)
+
+/* try to fill the TLB and return an exception if error. If retaddr is
+   NULL, it means that the function was called in C code (i.e. not
+   from generated code or from helper.c) */
+/* XXX: fix it to restore all registers */
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
+              uintptr_t retaddr)
+{
+    int ret;
+
+    ret = s390_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
+    if (unlikely(ret != 0)) {
+        if (likely(retaddr)) {
+            /* now we have a real cpu fault */
+            cpu_restore_state(cs, retaddr);
+        }
+        cpu_loop_exit(cs);
+    }
+}
+
+#endif
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+/* Reduce the length so that addr + len doesn't cross a page boundary.  */
+static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
+{
+#ifndef CONFIG_USER_ONLY
+    if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
+        return -addr & ~TARGET_PAGE_MASK;
+    }
+#endif
+    return len;
+}
+
+static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
+                        uint32_t l)
+{
+    int mmu_idx = cpu_mmu_index(env);
+
+    while (l > 0) {
+        void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
+        if (p) {
+            /* Access to the whole page in write mode granted.  */
+            int l_adj = adj_len_to_page(l, dest);
+            memset(p, byte, l_adj);
+            dest += l_adj;
+            l -= l_adj;
+        } else {
+            /* We failed to get access to the whole page. The next write
+               access will likely fill the QEMU TLB for the next iteration.  */
+            cpu_stb_data(env, dest, byte);
+            dest++;
+            l--;
+        }
+    }
+}
+
+static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
+                         uint32_t l)
+{
+    int mmu_idx = cpu_mmu_index(env);
+
+    while (l > 0) {
+        void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
+        void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
+        if (src_p && dest_p) {
+            /* Access to both whole pages granted.  */
+            int l_adj = adj_len_to_page(l, src);
+            l_adj = adj_len_to_page(l_adj, dest);
+            memmove(dest_p, src_p, l_adj);
+            src += l_adj;
+            dest += l_adj;
+            l -= l_adj;
+        } else {
+            /* We failed to get access to one or both whole pages. The next
+               read or write access will likely fill the QEMU TLB for the
+               next iteration.  */
+            cpu_stb_data(env, dest, cpu_ldub_data(env, src));
+            src++;
+            dest++;
+            l--;
+        }
+    }
+}
+
+/* and on array */
+uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+                    uint64_t src)
+{
+    int i;
+    unsigned char x;
+    uint32_t cc = 0;
+
+    HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+               __func__, l, dest, src);
+    for (i = 0; i <= l; i++) {
+        x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
+        if (x) {
+            cc = 1;
+        }
+        cpu_stb_data(env, dest + i, x);
+    }
+    return cc;
+}
+
+/* xor on array */
+uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+                    uint64_t src)
+{
+    int i;
+    unsigned char x;
+    uint32_t cc = 0;
+
+    HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+               __func__, l, dest, src);
+
+    /* xor with itself is the same as memset(0) */
+    if (src == dest) {
+        fast_memset(env, dest, 0, l + 1);
+        return 0;
+    }
+
+    for (i = 0; i <= l; i++) {
+        x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
+        if (x) {
+            cc = 1;
+        }
+        cpu_stb_data(env, dest + i, x);
+    }
+    return cc;
+}
+
+/* or on array */
+uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+                    uint64_t src)
+{
+    int i;
+    unsigned char x;
+    uint32_t cc = 0;
+
+    HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+               __func__, l, dest, src);
+    for (i = 0; i <= l; i++) {
+        x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
+        if (x) {
+            cc = 1;
+        }
+        cpu_stb_data(env, dest + i, x);
+    }
+    return cc;
+}
+
+/* memmove */
+void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+    int i = 0;
+
+    HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+               __func__, l, dest, src);
+
+    /* mvc with source pointing to the byte after the destination is the
+       same as memset with the first source byte */
+    if (dest == (src + 1)) {
+        fast_memset(env, dest, cpu_ldub_data(env, src), l + 1);
+        return;
+    }
+
+    /* mvc and memmove do not behave the same when areas overlap! */
+    if ((dest < src) || (src + l < dest)) {
+        fast_memmove(env, dest, src, l + 1);
+        return;
+    }
+
+    /* slow version with byte accesses which always work */
+    for (i = 0; i <= l; i++) {
+        cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
+    }
+}
+
+/* compare unsigned byte arrays */
+uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
+{
+    int i;
+    unsigned char x, y;
+    uint32_t cc;
+
+    HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
+               __func__, l, s1, s2);
+    for (i = 0; i <= l; i++) {
+        x = cpu_ldub_data(env, s1 + i);
+        y = cpu_ldub_data(env, s2 + i);
+        HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
+        if (x < y) {
+            cc = 1;
+            goto done;
+        } else if (x > y) {
+            cc = 2;
+            goto done;
+        }
+    }
+    cc = 0;
+ done:
+    HELPER_LOG("\n");
+    return cc;
+}
+
+/* compare logical under mask */
+uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
+                     uint64_t addr)
+{
+    uint8_t r, d;
+    uint32_t cc;
+
+    HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
+               mask, addr);
+    cc = 0;
+    while (mask) {
+        if (mask & 8) {
+            d = cpu_ldub_data(env, addr);
+            r = (r1 & 0xff000000UL) >> 24;
+            HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
+                       addr);
+            if (r < d) {
+                cc = 1;
+                break;
+            } else if (r > d) {
+                cc = 2;
+                break;
+            }
+            addr++;
+        }
+        mask = (mask << 1) & 0xf;
+        r1 <<= 8;
+    }
+    HELPER_LOG("\n");
+    return cc;
+}
+
+static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
+{
+    /* 31-Bit mode */
+    if (!(env->psw.mask & PSW_MASK_64)) {
+        a &= 0x7fffffff;
+    }
+    return a;
+}
+
+static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
+{
+    uint64_t r = d2;
+    if (x2) {
+        r += env->regs[x2];
+    }
+    if (b2) {
+        r += env->regs[b2];
+    }
+    return fix_address(env, r);
+}
+
+static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
+{
+    return fix_address(env, env->regs[reg]);
+}
+
+/* search string (c is byte to search, r2 is string, r1 end of string) */
+uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
+                      uint64_t str)
+{
+    uint32_t len;
+    uint8_t v, c = r0;
+
+    str = fix_address(env, str);
+    end = fix_address(env, end);
+
+    /* Assume for now that R2 is unmodified.  */
+    env->retxl = str;
+
+    /* Lest we fail to service interrupts in a timely manner, limit the
+       amount of work we're willing to do.  For now, let's cap at 8k.  */
+    for (len = 0; len < 0x2000; ++len) {
+        if (str + len == end) {
+            /* Character not found.  R1 & R2 are unmodified.  */
+            env->cc_op = 2;
+            return end;
+        }
+        v = cpu_ldub_data(env, str + len);
+        if (v == c) {
+            /* Character found.  Set R1 to the location; R2 is unmodified.  */
+            env->cc_op = 1;
+            return str + len;
+        }
+    }
+
+    /* CPU-determined bytes processed.  Advance R2 to next byte to process.  */
+    env->retxl = str + len;
+    env->cc_op = 3;
+    return end;
+}
+
+/* unsigned string compare (c is string terminator) */
+uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
+{
+    uint32_t len;
+
+    c = c & 0xff;
+    s1 = fix_address(env, s1);
+    s2 = fix_address(env, s2);
+
+    /* Lest we fail to service interrupts in a timely manner, limit the
+       amount of work we're willing to do.  For now, let's cap at 8k.  */
+    for (len = 0; len < 0x2000; ++len) {
+        uint8_t v1 = cpu_ldub_data(env, s1 + len);
+        uint8_t v2 = cpu_ldub_data(env, s2 + len);
+        if (v1 == v2) {
+            if (v1 == c) {
+                /* Equal.  CC=0, and don't advance the registers.  */
+                env->cc_op = 0;
+                env->retxl = s2;
+                return s1;
+            }
+        } else {
+            /* Unequal.  CC={1,2}, and advance the registers.  Note that
+               the terminator need not be zero, but the string that contains
+               the terminator is by definition "low".  */
+            env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
+            env->retxl = s2 + len;
+            return s1 + len;
+        }
+    }
+
+    /* CPU-determined bytes equal; advance the registers.  */
+    env->cc_op = 3;
+    env->retxl = s2 + len;
+    return s1 + len;
+}
+
+/* move page */
+void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
+{
+    /* XXX missing r0 handling */
+    env->cc_op = 0;
+    fast_memmove(env, r1, r2, TARGET_PAGE_SIZE);
+}
+
+/* string copy (c is string terminator) */
+uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
+{
+    uint32_t len;
+
+    c = c & 0xff;
+    d = fix_address(env, d);
+    s = fix_address(env, s);
+
+    /* Lest we fail to service interrupts in a timely manner, limit the
+       amount of work we're willing to do.  For now, let's cap at 8k.  */
+    for (len = 0; len < 0x2000; ++len) {
+        uint8_t v = cpu_ldub_data(env, s + len);
+        cpu_stb_data(env, d + len, v);
+        if (v == c) {
+            /* Complete.  Set CC=1 and advance R1.  */
+            env->cc_op = 1;
+            env->retxl = s;
+            return d + len;
+        }
+    }
+
+    /* Incomplete.  Set CC=3 and signal to advance R1 and R2.  */
+    env->cc_op = 3;
+    env->retxl = s + len;
+    return d + len;
+}
+
+static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
+                           uint32_t mask)
+{
+    int pos = 24; /* top of the lower half of r1 */
+    uint64_t rmask = 0xff000000ULL;
+    uint8_t val = 0;
+    int ccd = 0;
+    uint32_t cc = 0;
+
+    while (mask) {
+        if (mask & 8) {
+            env->regs[r1] &= ~rmask;
+            val = cpu_ldub_data(env, address);
+            if ((val & 0x80) && !ccd) {
+                cc = 1;
+            }
+            ccd = 1;
+            if (val && cc == 0) {
+                cc = 2;
+            }
+            env->regs[r1] |= (uint64_t)val << pos;
+            address++;
+        }
+        mask = (mask << 1) & 0xf;
+        pos -= 8;
+        rmask >>= 8;
+    }
+
+    return cc;
+}
+
+/* execute instruction
+   this instruction executes an insn modified with the contents of r1
+   it does not change the executed instruction in memory
+   it does not change the program counter
+   in other words: tricky...
+   currently implemented by interpreting the cases it is most commonly used in
+*/
+uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
+                    uint64_t addr, uint64_t ret)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    uint16_t insn = cpu_lduw_code(env, addr);
+
+    HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
+               insn);
+    if ((insn & 0xf0ff) == 0xd000) {
+        uint32_t l, insn2, b1, b2, d1, d2;
+
+        l = v1 & 0xff;
+        insn2 = cpu_ldl_code(env, addr + 2);
+        b1 = (insn2 >> 28) & 0xf;
+        b2 = (insn2 >> 12) & 0xf;
+        d1 = (insn2 >> 16) & 0xfff;
+        d2 = insn2 & 0xfff;
+        switch (insn & 0xf00) {
+        case 0x200:
+            helper_mvc(env, l, get_address(env, 0, b1, d1),
+                       get_address(env, 0, b2, d2));
+            break;
+        case 0x400:
+            cc = helper_nc(env, l, get_address(env, 0, b1, d1),
+                            get_address(env, 0, b2, d2));
+            break;
+        case 0x500:
+            cc = helper_clc(env, l, get_address(env, 0, b1, d1),
+                            get_address(env, 0, b2, d2));
+            break;
+        case 0x600:
+            cc = helper_oc(env, l, get_address(env, 0, b1, d1),
+                            get_address(env, 0, b2, d2));
+            break;
+        case 0x700:
+            cc = helper_xc(env, l, get_address(env, 0, b1, d1),
+                           get_address(env, 0, b2, d2));
+            break;
+        case 0xc00:
+            helper_tr(env, l, get_address(env, 0, b1, d1),
+                      get_address(env, 0, b2, d2));
+            break;
+        case 0xd00:
+            cc = helper_trt(env, l, get_address(env, 0, b1, d1),
+                            get_address(env, 0, b2, d2));
+            break;
+        default:
+            goto abort;
+        }
+    } else if ((insn & 0xff00) == 0x0a00) {
+        /* supervisor call */
+        HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
+        env->psw.addr = ret - 4;
+        env->int_svc_code = (insn | v1) & 0xff;
+        env->int_svc_ilen = 4;
+        helper_exception(env, EXCP_SVC);
+    } else if ((insn & 0xff00) == 0xbf00) {
+        uint32_t insn2, r1, r3, b2, d2;
+
+        insn2 = cpu_ldl_code(env, addr + 2);
+        r1 = (insn2 >> 20) & 0xf;
+        r3 = (insn2 >> 16) & 0xf;
+        b2 = (insn2 >> 12) & 0xf;
+        d2 = insn2 & 0xfff;
+        cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
+    } else {
+    abort:
+        cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
+                  insn);
+    }
+    return cc;
+}
+
+/* load access registers r1 to r3 from memory at a2 */
+void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+    int i;
+
+    for (i = r1;; i = (i + 1) % 16) {
+        env->aregs[i] = cpu_ldl_data(env, a2);
+        a2 += 4;
+
+        if (i == r3) {
+            break;
+        }
+    }
+}
+
+/* store access registers r1 to r3 in memory at a2 */
+void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+    int i;
+
+    for (i = r1;; i = (i + 1) % 16) {
+        cpu_stl_data(env, a2, env->aregs[i]);
+        a2 += 4;
+
+        if (i == r3) {
+            break;
+        }
+    }
+}
+
+/* move long */
+uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+    uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
+    uint64_t dest = get_address_31fix(env, r1);
+    uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
+    uint64_t src = get_address_31fix(env, r2);
+    uint8_t pad = env->regs[r2 + 1] >> 24;
+    uint8_t v;
+    uint32_t cc;
+
+    if (destlen == srclen) {
+        cc = 0;
+    } else if (destlen < srclen) {
+        cc = 1;
+    } else {
+        cc = 2;
+    }
+
+    if (srclen > destlen) {
+        srclen = destlen;
+    }
+
+    for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
+        v = cpu_ldub_data(env, src);
+        cpu_stb_data(env, dest, v);
+    }
+
+    for (; destlen; dest++, destlen--) {
+        cpu_stb_data(env, dest, pad);
+    }
+
+    env->regs[r1 + 1] = destlen;
+    /* can't use srclen here, we trunc'ed it */
+    env->regs[r2 + 1] -= src - env->regs[r2];
+    env->regs[r1] = dest;
+    env->regs[r2] = src;
+
+    return cc;
+}
+
+/* move long extended another memcopy insn with more bells and whistles */
+uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+                       uint32_t r3)
+{
+    uint64_t destlen = env->regs[r1 + 1];
+    uint64_t dest = env->regs[r1];
+    uint64_t srclen = env->regs[r3 + 1];
+    uint64_t src = env->regs[r3];
+    uint8_t pad = a2 & 0xff;
+    uint8_t v;
+    uint32_t cc;
+
+    if (!(env->psw.mask & PSW_MASK_64)) {
+        destlen = (uint32_t)destlen;
+        srclen = (uint32_t)srclen;
+        dest &= 0x7fffffff;
+        src &= 0x7fffffff;
+    }
+
+    if (destlen == srclen) {
+        cc = 0;
+    } else if (destlen < srclen) {
+        cc = 1;
+    } else {
+        cc = 2;
+    }
+
+    if (srclen > destlen) {
+        srclen = destlen;
+    }
+
+    for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
+        v = cpu_ldub_data(env, src);
+        cpu_stb_data(env, dest, v);
+    }
+
+    for (; destlen; dest++, destlen--) {
+        cpu_stb_data(env, dest, pad);
+    }
+
+    env->regs[r1 + 1] = destlen;
+    /* can't use srclen here, we trunc'ed it */
+    /* FIXME: 31-bit mode! */
+    env->regs[r3 + 1] -= src - env->regs[r3];
+    env->regs[r1] = dest;
+    env->regs[r3] = src;
+
+    return cc;
+}
+
+/* compare logical long extended memcompare insn with padding */
+uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+                       uint32_t r3)
+{
+    uint64_t destlen = env->regs[r1 + 1];
+    uint64_t dest = get_address_31fix(env, r1);
+    uint64_t srclen = env->regs[r3 + 1];
+    uint64_t src = get_address_31fix(env, r3);
+    uint8_t pad = a2 & 0xff;
+    uint8_t v1 = 0, v2 = 0;
+    uint32_t cc = 0;
+
+    if (!(destlen || srclen)) {
+        return cc;
+    }
+
+    if (srclen > destlen) {
+        srclen = destlen;
+    }
+
+    for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
+        v1 = srclen ? cpu_ldub_data(env, src) : pad;
+        v2 = destlen ? cpu_ldub_data(env, dest) : pad;
+        if (v1 != v2) {
+            cc = (v1 < v2) ? 1 : 2;
+            break;
+        }
+    }
+
+    env->regs[r1 + 1] = destlen;
+    /* can't use srclen here, we trunc'ed it */
+    env->regs[r3 + 1] -= src - env->regs[r3];
+    env->regs[r1] = dest;
+    env->regs[r3] = src;
+
+    return cc;
+}
+
+/* checksum */
+uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
+                      uint64_t src, uint64_t src_len)
+{
+    uint64_t max_len, len;
+    uint64_t cksm = (uint32_t)r1;
+
+    /* Lest we fail to service interrupts in a timely manner, limit the
+       amount of work we're willing to do.  For now, let's cap at 8k.  */
+    max_len = (src_len > 0x2000 ? 0x2000 : src_len);
+
+    /* Process full words as available.  */
+    for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
+        cksm += (uint32_t)cpu_ldl_data(env, src);
+    }
+
+    switch (max_len - len) {
+    case 1:
+        cksm += cpu_ldub_data(env, src) << 24;
+        len += 1;
+        break;
+    case 2:
+        cksm += cpu_lduw_data(env, src) << 16;
+        len += 2;
+        break;
+    case 3:
+        cksm += cpu_lduw_data(env, src) << 16;
+        cksm += cpu_ldub_data(env, src + 2) << 8;
+        len += 3;
+        break;
+    }
+
+    /* Fold the carry from the checksum.  Note that we can see carry-out
+       during folding more than once (but probably not more than twice).  */
+    while (cksm > 0xffffffffull) {
+        cksm = (uint32_t)cksm + (cksm >> 32);
+    }
+
+    /* Indicate whether or not we've processed everything.  */
+    env->cc_op = (len == src_len ? 0 : 3);
+
+    /* Return both cksm and processed length.  */
+    env->retxl = cksm;
+    return len;
+}
+
+void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
+                  uint64_t src)
+{
+    int len_dest = len >> 4;
+    int len_src = len & 0xf;
+    uint8_t b;
+    int second_nibble = 0;
+
+    dest += len_dest;
+    src += len_src;
+
+    /* last byte is special, it only flips the nibbles */
+    b = cpu_ldub_data(env, src);
+    cpu_stb_data(env, dest, (b << 4) | (b >> 4));
+    src--;
+    len_src--;
+
+    /* now pad every nibble with 0xf0 */
+
+    while (len_dest > 0) {
+        uint8_t cur_byte = 0;
+
+        if (len_src > 0) {
+            cur_byte = cpu_ldub_data(env, src);
+        }
+
+        len_dest--;
+        dest--;
+
+        /* only advance one nibble at a time */
+        if (second_nibble) {
+            cur_byte >>= 4;
+            len_src--;
+            src--;
+        }
+        second_nibble = !second_nibble;
+
+        /* digit */
+        cur_byte = (cur_byte & 0xf);
+        /* zone bits */
+        cur_byte |= 0xf0;
+
+        cpu_stb_data(env, dest, cur_byte);
+    }
+}
+
+void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
+                uint64_t trans)
+{
+    int i;
+
+    for (i = 0; i <= len; i++) {
+        uint8_t byte = cpu_ldub_data(env, array + i);
+        uint8_t new_byte = cpu_ldub_data(env, trans + byte);
+
+        cpu_stb_data(env, array + i, new_byte);
+    }
+}
+
+uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
+                     uint64_t len, uint64_t trans)
+{
+    uint8_t end = env->regs[0] & 0xff;
+    uint64_t l = len;
+    uint64_t i;
+
+    if (!(env->psw.mask & PSW_MASK_64)) {
+        array &= 0x7fffffff;
+        l = (uint32_t)l;
+    }
+
+    /* Lest we fail to service interrupts in a timely manner, limit the
+       amount of work we're willing to do.  For now, let's cap at 8k.  */
+    if (l > 0x2000) {
+        l = 0x2000;
+        env->cc_op = 3;
+    } else {
+        env->cc_op = 0;
+    }
+
+    for (i = 0; i < l; i++) {
+        uint8_t byte, new_byte;
+
+        byte = cpu_ldub_data(env, array + i);
+
+        if (byte == end) {
+            env->cc_op = 1;
+            break;
+        }
+
+        new_byte = cpu_ldub_data(env, trans + byte);
+        cpu_stb_data(env, array + i, new_byte);
+    }
+
+    env->retxl = len - i;
+    return array + i;
+}
+
+uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
+                     uint64_t trans)
+{
+    uint32_t cc = 0;
+    int i;
+
+    for (i = 0; i <= len; i++) {
+        uint8_t byte = cpu_ldub_data(env, array + i);
+        uint8_t sbyte = cpu_ldub_data(env, trans + byte);
+
+        if (sbyte != 0) {
+            env->regs[1] = array + i;
+            env->regs[2] = (env->regs[2] & ~0xff) | sbyte;
+            cc = (i == len) ? 2 : 1;
+            break;
+        }
+    }
+
+    return cc;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    bool PERchanged = false;
+    int i;
+    uint64_t src = a2;
+    uint64_t val;
+
+    for (i = r1;; i = (i + 1) % 16) {
+        val = cpu_ldq_data(env, src);
+        if (env->cregs[i] != val && i >= 9 && i <= 11) {
+            PERchanged = true;
+        }
+        env->cregs[i] = val;
+        HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
+                   i, src, env->cregs[i]);
+        src += sizeof(uint64_t);
+
+        if (i == r3) {
+            break;
+        }
+    }
+
+    if (PERchanged && env->psw.mask & PSW_MASK_PER) {
+        s390_cpu_recompute_watchpoints(CPU(cpu));
+    }
+
+    tlb_flush(CPU(cpu), 1);
+}
+
+void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    bool PERchanged = false;
+    int i;
+    uint64_t src = a2;
+    uint32_t val;
+
+    for (i = r1;; i = (i + 1) % 16) {
+        val = cpu_ldl_data(env, src);
+        if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
+            PERchanged = true;
+        }
+        env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | val;
+        src += sizeof(uint32_t);
+
+        if (i == r3) {
+            break;
+        }
+    }
+
+    if (PERchanged && env->psw.mask & PSW_MASK_PER) {
+        s390_cpu_recompute_watchpoints(CPU(cpu));
+    }
+
+    tlb_flush(CPU(cpu), 1);
+}
+
+void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+    int i;
+    uint64_t dest = a2;
+
+    for (i = r1;; i = (i + 1) % 16) {
+        cpu_stq_data(env, dest, env->cregs[i]);
+        dest += sizeof(uint64_t);
+
+        if (i == r3) {
+            break;
+        }
+    }
+}
+
+void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+    int i;
+    uint64_t dest = a2;
+
+    for (i = r1;; i = (i + 1) % 16) {
+        cpu_stl_data(env, dest, env->cregs[i]);
+        dest += sizeof(uint32_t);
+
+        if (i == r3) {
+            break;
+        }
+    }
+}
+
+uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
+{
+    /* XXX implement */
+
+    return 0;
+}
+
+/* insert storage key extended */
+uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
+{
+    uint64_t addr = get_address(env, 0, 0, r2);
+
+    if (addr > ram_size) {
+        return 0;
+    }
+
+    return env->storage_keys[addr / TARGET_PAGE_SIZE];
+}
+
+/* set storage key extended */
+void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
+{
+    uint64_t addr = get_address(env, 0, 0, r2);
+
+    if (addr > ram_size) {
+        return;
+    }
+
+    env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
+}
+
+/* reset reference bit extended */
+uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
+{
+    uint8_t re;
+    uint8_t key;
+
+    if (r2 > ram_size) {
+        return 0;
+    }
+
+    key = env->storage_keys[r2 / TARGET_PAGE_SIZE];
+    re = key & (SK_R | SK_C);
+    env->storage_keys[r2 / TARGET_PAGE_SIZE] = (key & ~SK_R);
+
+    /*
+     * cc
+     *
+     * 0  Reference bit zero; change bit zero
+     * 1  Reference bit zero; change bit one
+     * 2  Reference bit one; change bit zero
+     * 3  Reference bit one; change bit one
+     */
+
+    return re >> 1;
+}
+
+/* compare and swap and purge */
+uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    uint32_t cc;
+    uint32_t o1 = env->regs[r1];
+    uint64_t a2 = r2 & ~3ULL;
+    uint32_t o2 = cpu_ldl_data(env, a2);
+
+    if (o1 == o2) {
+        cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
+        if (r2 & 0x3) {
+            /* flush TLB / ALB */
+            tlb_flush(CPU(cpu), 1);
+        }
+        cc = 0;
+    } else {
+        env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
+        cc = 1;
+    }
+
+    return cc;
+}
+
+uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
+{
+    int cc = 0, i;
+
+    HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
+               __func__, l, a1, a2);
+
+    if (l > 256) {
+        /* max 256 */
+        l = 256;
+        cc = 3;
+    }
+
+    /* XXX replace w/ memcpy */
+    for (i = 0; i < l; i++) {
+        cpu_stb_secondary(env, a1 + i, cpu_ldub_primary(env, a2 + i));
+    }
+
+    return cc;
+}
+
+uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
+{
+    int cc = 0, i;
+
+    HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
+               __func__, l, a1, a2);
+
+    if (l > 256) {
+        /* max 256 */
+        l = 256;
+        cc = 3;
+    }
+
+    /* XXX replace w/ memcpy */
+    for (i = 0; i < l; i++) {
+        cpu_stb_primary(env, a1 + i, cpu_ldub_secondary(env, a2 + i));
+    }
+
+    return cc;
+}
+
+/* invalidate pte */
+void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+    uint64_t page = vaddr & TARGET_PAGE_MASK;
+    uint64_t pte = 0;
+
+    /* XXX broadcast to other CPUs */
+
+    /* XXX Linux is nice enough to give us the exact pte address.
+       According to spec we'd have to find it out ourselves */
+    /* XXX Linux is fine with overwriting the pte, the spec requires
+       us to only set the invalid bit */
+    stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
+
+    /* XXX we exploit the fact that Linux passes the exact virtual
+       address here - it's not obliged to! */
+    tlb_flush_page(cs, page);
+
+    /* XXX 31-bit hack */
+    if (page & 0x80000000) {
+        tlb_flush_page(cs, page & ~0x80000000);
+    } else {
+        tlb_flush_page(cs, page | 0x80000000);
+    }
+}
+
+/* flush local tlb */
+void HELPER(ptlb)(CPUS390XState *env)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+
+    tlb_flush(CPU(cpu), 1);
+}
+
+/* load using real address */
+uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+
+    return (uint32_t)ldl_phys(cs->as, get_address(env, 0, 0, addr));
+}
+
+uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+
+    return ldq_phys(cs->as, get_address(env, 0, 0, addr));
+}
+
+/* store using real address */
+void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+
+    stl_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
+
+    if ((env->psw.mask & PSW_MASK_PER) &&
+        (env->cregs[9] & PER_CR9_EVENT_STORE) &&
+        (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
+        /* PSW is saved just before calling the helper.  */
+        env->per_address = env->psw.addr;
+        env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
+    }
+}
+
+void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+
+    stq_phys(cs->as, get_address(env, 0, 0, addr), v1);
+
+    if ((env->psw.mask & PSW_MASK_PER) &&
+        (env->cregs[9] & PER_CR9_EVENT_STORE) &&
+        (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
+        /* PSW is saved just before calling the helper.  */
+        env->per_address = env->psw.addr;
+        env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
+    }
+}
+
+/* load real address */
+uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+    uint32_t cc = 0;
+    int old_exc = cs->exception_index;
+    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+    uint64_t ret;
+    int flags;
+
+    /* XXX incomplete - has more corner cases */
+    if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
+        program_interrupt(env, PGM_SPECIAL_OP, 2);
+    }
+
+    cs->exception_index = old_exc;
+    if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
+        cc = 3;
+    }
+    if (cs->exception_index == EXCP_PGM) {
+        ret = env->int_pgm_code | 0x80000000;
+    } else {
+        ret |= addr & ~TARGET_PAGE_MASK;
+    }
+    cs->exception_index = old_exc;
+
+    env->cc_op = cc;
+    return ret;
+}
+#endif
diff --git a/qemu/target-s390x/misc_helper.c b/qemu/target-s390x/misc_helper.c
new file mode 100644
index 000000000..8eac0e12b
--- /dev/null
+++ b/qemu/target-s390x/misc_helper.c
@@ -0,0 +1,642 @@
+/*
+ *  S/390 misc helper routines
+ *
+ *  Copyright (c) 2009 Ulrich Hecht
+ *  Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "exec/memory.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include <string.h>
+#include "sysemu/kvm.h"
+#include "qemu/timer.h"
+#include "exec/address-spaces.h"
+#ifdef CONFIG_KVM
+#include <linux/kvm.h>
+#endif
+#include "exec/cpu_ldst.h"
+#include "hw/watchdog/wdt_diag288.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "sysemu/cpus.h"
+#include "sysemu/sysemu.h"
+#include "hw/s390x/ebcdic.h"
+#include "hw/s390x/ipl.h"
+#endif
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+/* Raise an exception dynamically from a helper function.  */
+void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
+                                     uintptr_t retaddr)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+    int t;
+
+    cs->exception_index = EXCP_PGM;
+    env->int_pgm_code = excp;
+
+    /* Use the (ultimate) callers address to find the insn that trapped.  */
+    cpu_restore_state(cs, retaddr);
+
+    /* Advance past the insn.  */
+    t = cpu_ldub_code(env, env->psw.addr);
+    env->int_pgm_ilen = t = get_ilen(t);
+    env->psw.addr += t;
+
+    cpu_loop_exit(cs);
+}
+
+/* Raise an exception statically from a TB.  */
+void HELPER(exception)(CPUS390XState *env, uint32_t excp)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+
+    HELPER_LOG("%s: exception %d\n", __func__, excp);
+    cs->exception_index = excp;
+    cpu_loop_exit(cs);
+}
+
+#ifndef CONFIG_USER_ONLY
+
+void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+
+    qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
+                  env->psw.addr);
+
+    if (kvm_enabled()) {
+#ifdef CONFIG_KVM
+        struct kvm_s390_irq irq = {
+            .type = KVM_S390_PROGRAM_INT,
+            .u.pgm.code = code,
+        };
+
+        kvm_s390_vcpu_interrupt(cpu, &irq);
+#endif
+    } else {
+        CPUState *cs = CPU(cpu);
+
+        env->int_pgm_code = code;
+        env->int_pgm_ilen = ilen;
+        cs->exception_index = EXCP_PGM;
+        cpu_loop_exit(cs);
+    }
+}
+
+/* SCLP service call */
+uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
+{
+    int r = sclp_service_call(env, r1, r2);
+    if (r < 0) {
+        program_interrupt(env, -r, 4);
+        return 0;
+    }
+    return r;
+}
+
+#ifndef CONFIG_USER_ONLY
+static int modified_clear_reset(S390CPU *cpu)
+{
+    S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+    CPUState *t;
+
+    pause_all_vcpus();
+    cpu_synchronize_all_states();
+    CPU_FOREACH(t) {
+        run_on_cpu(t, s390_do_cpu_full_reset, t);
+    }
+    cmma_reset(cpu);
+    io_subsystem_reset();
+    scc->load_normal(CPU(cpu));
+    cpu_synchronize_all_post_reset();
+    resume_all_vcpus();
+    return 0;
+}
+
+static int load_normal_reset(S390CPU *cpu)
+{
+    S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+    CPUState *t;
+
+    pause_all_vcpus();
+    cpu_synchronize_all_states();
+    CPU_FOREACH(t) {
+        run_on_cpu(t, s390_do_cpu_reset, t);
+    }
+    cmma_reset(cpu);
+    io_subsystem_reset();
+    scc->initial_cpu_reset(CPU(cpu));
+    scc->load_normal(CPU(cpu));
+    cpu_synchronize_all_post_reset();
+    resume_all_vcpus();
+    return 0;
+}
+
+int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3)
+{
+    uint64_t func = env->regs[r1];
+    uint64_t timeout = env->regs[r1 + 1];
+    uint64_t action = env->regs[r3];
+    Object *obj;
+    DIAG288State *diag288;
+    DIAG288Class *diag288_class;
+
+    if (r1 % 2 || action != 0) {
+        return -1;
+    }
+
+    /* Timeout must be more than 15 seconds except for timer deletion */
+    if (func != WDT_DIAG288_CANCEL && timeout < 15) {
+        return -1;
+    }
+
+    obj = object_resolve_path_type("", TYPE_WDT_DIAG288, NULL);
+    if (!obj) {
+        return -1;
+    }
+
+    diag288 = DIAG288(obj);
+    diag288_class = DIAG288_GET_CLASS(diag288);
+    return diag288_class->handle_timer(diag288, func, timeout);
+}
+
+#define DIAG_308_RC_OK              0x0001
+#define DIAG_308_RC_NO_CONF         0x0102
+#define DIAG_308_RC_INVALID         0x0402
+
+void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
+{
+    uint64_t addr =  env->regs[r1];
+    uint64_t subcode = env->regs[r3];
+    IplParameterBlock *iplb;
+
+    if (env->psw.mask & PSW_MASK_PSTATE) {
+        program_interrupt(env, PGM_PRIVILEGED, ILEN_LATER_INC);
+        return;
+    }
+
+    if ((subcode & ~0x0ffffULL) || (subcode > 6)) {
+        program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
+        return;
+    }
+
+    switch (subcode) {
+    case 0:
+        modified_clear_reset(s390_env_get_cpu(env));
+        if (tcg_enabled()) {
+            cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+        }
+        break;
+    case 1:
+        load_normal_reset(s390_env_get_cpu(env));
+        if (tcg_enabled()) {
+            cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+        }
+        break;
+    case 3:
+        s390_reipl_request();
+        if (tcg_enabled()) {
+            cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+        }
+        break;
+    case 5:
+        if ((r1 & 1) || (addr & 0x0fffULL)) {
+            program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
+            return;
+        }
+        if (!address_space_access_valid(&address_space_memory, addr,
+                                        sizeof(IplParameterBlock), false)) {
+            program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC);
+            return;
+        }
+        iplb = g_malloc0(sizeof(struct IplParameterBlock));
+        cpu_physical_memory_read(addr, iplb, sizeof(struct IplParameterBlock));
+        if (!s390_ipl_update_diag308(iplb)) {
+            env->regs[r1 + 1] = DIAG_308_RC_OK;
+        } else {
+            env->regs[r1 + 1] = DIAG_308_RC_INVALID;
+        }
+        g_free(iplb);
+        return;
+    case 6:
+        if ((r1 & 1) || (addr & 0x0fffULL)) {
+            program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
+            return;
+        }
+        if (!address_space_access_valid(&address_space_memory, addr,
+                                        sizeof(IplParameterBlock), true)) {
+            program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC);
+            return;
+        }
+        iplb = s390_ipl_get_iplb();
+        if (iplb) {
+            cpu_physical_memory_write(addr, iplb,
+                                      sizeof(struct IplParameterBlock));
+            env->regs[r1 + 1] = DIAG_308_RC_OK;
+        } else {
+            env->regs[r1 + 1] = DIAG_308_RC_NO_CONF;
+        }
+        return;
+    default:
+        hw_error("Unhandled diag308 subcode %" PRIx64, subcode);
+        break;
+    }
+}
+#endif
+
+void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
+{
+    uint64_t r;
+
+    switch (num) {
+    case 0x500:
+        /* KVM hypercall */
+        r = s390_virtio_hypercall(env);
+        break;
+    case 0x44:
+        /* yield */
+        r = 0;
+        break;
+    case 0x308:
+        /* ipl */
+        handle_diag_308(env, r1, r3);
+        r = 0;
+        break;
+    default:
+        r = -1;
+        break;
+    }
+
+    if (r) {
+        program_interrupt(env, PGM_OPERATION, ILEN_LATER_INC);
+    }
+}
+
+/* Set Prefix */
+void HELPER(spx)(CPUS390XState *env, uint64_t a1)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+    uint32_t prefix = a1 & 0x7fffe000;
+
+    env->psa = prefix;
+    qemu_log("prefix: %#x\n", prefix);
+    tlb_flush_page(cs, 0);
+    tlb_flush_page(cs, TARGET_PAGE_SIZE);
+}
+
+/* Store Clock */
+uint64_t HELPER(stck)(CPUS390XState *env)
+{
+    uint64_t time;
+
+    time = env->tod_offset +
+        time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - env->tod_basetime);
+
+    return time;
+}
+
+/* Set Clock Comparator */
+void HELPER(sckc)(CPUS390XState *env, uint64_t time)
+{
+    if (time == -1ULL) {
+        return;
+    }
+
+    env->ckc = time;
+
+    /* difference between origins */
+    time -= env->tod_offset;
+
+    /* nanoseconds */
+    time = tod2time(time);
+
+    timer_mod(env->tod_timer, env->tod_basetime + time);
+}
+
+/* Store Clock Comparator */
+uint64_t HELPER(stckc)(CPUS390XState *env)
+{
+    return env->ckc;
+}
+
+/* Set CPU Timer */
+void HELPER(spt)(CPUS390XState *env, uint64_t time)
+{
+    if (time == -1ULL) {
+        return;
+    }
+
+    /* nanoseconds */
+    time = tod2time(time);
+
+    env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
+
+    timer_mod(env->cpu_timer, env->cputm);
+}
+
+/* Store CPU Timer */
+uint64_t HELPER(stpt)(CPUS390XState *env)
+{
+    return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+}
+
+/* Store System Information */
+uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
+                      uint64_t r0, uint64_t r1)
+{
+    int cc = 0;
+    int sel1, sel2;
+
+    if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
+        ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
+        /* valid function code, invalid reserved bits */
+        program_interrupt(env, PGM_SPECIFICATION, 2);
+    }
+
+    sel1 = r0 & STSI_R0_SEL1_MASK;
+    sel2 = r1 & STSI_R1_SEL2_MASK;
+
+    /* XXX: spec exception if sysib is not 4k-aligned */
+
+    switch (r0 & STSI_LEVEL_MASK) {
+    case STSI_LEVEL_1:
+        if ((sel1 == 1) && (sel2 == 1)) {
+            /* Basic Machine Configuration */
+            struct sysib_111 sysib;
+
+            memset(&sysib, 0, sizeof(sysib));
+            ebcdic_put(sysib.manuf, "QEMU            ", 16);
+            /* same as machine type number in STORE CPU ID */
+            ebcdic_put(sysib.type, "QEMU", 4);
+            /* same as model number in STORE CPU ID */
+            ebcdic_put(sysib.model, "QEMU            ", 16);
+            ebcdic_put(sysib.sequence, "QEMU            ", 16);
+            ebcdic_put(sysib.plant, "QEMU", 4);
+            cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+        } else if ((sel1 == 2) && (sel2 == 1)) {
+            /* Basic Machine CPU */
+            struct sysib_121 sysib;
+
+            memset(&sysib, 0, sizeof(sysib));
+            /* XXX make different for different CPUs? */
+            ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
+            ebcdic_put(sysib.plant, "QEMU", 4);
+            stw_p(&sysib.cpu_addr, env->cpu_num);
+            cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+        } else if ((sel1 == 2) && (sel2 == 2)) {
+            /* Basic Machine CPUs */
+            struct sysib_122 sysib;
+
+            memset(&sysib, 0, sizeof(sysib));
+            stl_p(&sysib.capability, 0x443afc29);
+            /* XXX change when SMP comes */
+            stw_p(&sysib.total_cpus, 1);
+            stw_p(&sysib.active_cpus, 1);
+            stw_p(&sysib.standby_cpus, 0);
+            stw_p(&sysib.reserved_cpus, 0);
+            cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+        } else {
+            cc = 3;
+        }
+        break;
+    case STSI_LEVEL_2:
+        {
+            if ((sel1 == 2) && (sel2 == 1)) {
+                /* LPAR CPU */
+                struct sysib_221 sysib;
+
+                memset(&sysib, 0, sizeof(sysib));
+                /* XXX make different for different CPUs? */
+                ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
+                ebcdic_put(sysib.plant, "QEMU", 4);
+                stw_p(&sysib.cpu_addr, env->cpu_num);
+                stw_p(&sysib.cpu_id, 0);
+                cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+            } else if ((sel1 == 2) && (sel2 == 2)) {
+                /* LPAR CPUs */
+                struct sysib_222 sysib;
+
+                memset(&sysib, 0, sizeof(sysib));
+                stw_p(&sysib.lpar_num, 0);
+                sysib.lcpuc = 0;
+                /* XXX change when SMP comes */
+                stw_p(&sysib.total_cpus, 1);
+                stw_p(&sysib.conf_cpus, 1);
+                stw_p(&sysib.standby_cpus, 0);
+                stw_p(&sysib.reserved_cpus, 0);
+                ebcdic_put(sysib.name, "QEMU    ", 8);
+                stl_p(&sysib.caf, 1000);
+                stw_p(&sysib.dedicated_cpus, 0);
+                stw_p(&sysib.shared_cpus, 0);
+                cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+            } else {
+                cc = 3;
+            }
+            break;
+        }
+    case STSI_LEVEL_3:
+        {
+            if ((sel1 == 2) && (sel2 == 2)) {
+                /* VM CPUs */
+                struct sysib_322 sysib;
+
+                memset(&sysib, 0, sizeof(sysib));
+                sysib.count = 1;
+                /* XXX change when SMP comes */
+                stw_p(&sysib.vm[0].total_cpus, 1);
+                stw_p(&sysib.vm[0].conf_cpus, 1);
+                stw_p(&sysib.vm[0].standby_cpus, 0);
+                stw_p(&sysib.vm[0].reserved_cpus, 0);
+                ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
+                stl_p(&sysib.vm[0].caf, 1000);
+                ebcdic_put(sysib.vm[0].cpi, "KVM/Linux       ", 16);
+                cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+            } else {
+                cc = 3;
+            }
+            break;
+        }
+    case STSI_LEVEL_CURRENT:
+        env->regs[0] = STSI_LEVEL_3;
+        break;
+    default:
+        cc = 3;
+        break;
+    }
+
+    return cc;
+}
+
+uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
+                      uint64_t cpu_addr)
+{
+    int cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+
+    HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
+               __func__, order_code, r1, cpu_addr);
+
+    /* Remember: Use "R1 or R1 + 1, whichever is the odd-numbered register"
+       as parameter (input). Status (output) is always R1. */
+
+    switch (order_code) {
+    case SIGP_SET_ARCH:
+        /* switch arch */
+        break;
+    case SIGP_SENSE:
+        /* enumerate CPU status */
+        if (cpu_addr) {
+            /* XXX implement when SMP comes */
+            return 3;
+        }
+        env->regs[r1] &= 0xffffffff00000000ULL;
+        cc = 1;
+        break;
+#if !defined(CONFIG_USER_ONLY)
+    case SIGP_RESTART:
+        qemu_system_reset_request();
+        cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+        break;
+    case SIGP_STOP:
+        qemu_system_shutdown_request();
+        cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+        break;
+#endif
+    default:
+        /* unknown sigp */
+        fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
+        cc = SIGP_CC_NOT_OPERATIONAL;
+    }
+
+    return cc;
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    ioinst_handle_xsch(cpu, r1);
+}
+
+void HELPER(csch)(CPUS390XState *env, uint64_t r1)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    ioinst_handle_csch(cpu, r1);
+}
+
+void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    ioinst_handle_hsch(cpu, r1);
+}
+
+void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    ioinst_handle_msch(cpu, r1, inst >> 16);
+}
+
+void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    ioinst_handle_rchp(cpu, r1);
+}
+
+void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    ioinst_handle_rsch(cpu, r1);
+}
+
+void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    ioinst_handle_ssch(cpu, r1, inst >> 16);
+}
+
+void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    ioinst_handle_stsch(cpu, r1, inst >> 16);
+}
+
+void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    ioinst_handle_tsch(cpu, r1, inst >> 16);
+}
+
+void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+    ioinst_handle_chsc(cpu, inst >> 16);
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(per_check_exception)(CPUS390XState *env)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+
+    if (env->per_perc_atmid) {
+        env->int_pgm_code = PGM_PER;
+        env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, env->per_address));
+
+        cs->exception_index = EXCP_PGM;
+        cpu_loop_exit(cs);
+    }
+}
+
+void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
+{
+    if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
+        if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
+            || get_per_in_range(env, to)) {
+            env->per_address = from;
+            env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
+        }
+    }
+}
+
+void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
+{
+    if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
+        env->per_address = addr;
+        env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
+
+        /* If the instruction has to be nullified, trigger the
+           exception immediately. */
+        if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
+            CPUState *cs = CPU(s390_env_get_cpu(env));
+
+            env->int_pgm_code = PGM_PER;
+            env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
+
+            cs->exception_index = EXCP_PGM;
+            cpu_loop_exit(cs);
+        }
+    }
+}
+#endif
diff --git a/qemu/target-s390x/mmu_helper.c b/qemu/target-s390x/mmu_helper.c
new file mode 100644
index 000000000..1ea6d812c
--- /dev/null
+++ b/qemu/target-s390x/mmu_helper.c
@@ -0,0 +1,480 @@
+/*
+ * S390x MMU related functions
+ *
+ * Copyright (c) 2011 Alexander Graf
+ * Copyright (c) 2015 Thomas Huth, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "qemu/error-report.h"
+#include "exec/address-spaces.h"
+#include "cpu.h"
+#include "sysemu/kvm.h"
+
+/* #define DEBUG_S390 */
+/* #define DEBUG_S390_PTE */
+/* #define DEBUG_S390_STDOUT */
+
+#ifdef DEBUG_S390
+#ifdef DEBUG_S390_STDOUT
+#define DPRINTF(fmt, ...) \
+    do { fprintf(stderr, fmt, ## __VA_ARGS__); \
+         qemu_log(fmt, ##__VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
+#endif
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+#ifdef DEBUG_S390_PTE
+#define PTE_DPRINTF DPRINTF
+#else
+#define PTE_DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+/* Fetch/store bits in the translation exception code: */
+#define FS_READ  0x800
+#define FS_WRITE 0x400
+
+static void trigger_access_exception(CPUS390XState *env, uint32_t type,
+                                     uint32_t ilen, uint64_t tec)
+{
+    S390CPU *cpu = s390_env_get_cpu(env);
+
+    if (kvm_enabled()) {
+        kvm_s390_access_exception(cpu, type, tec);
+    } else {
+        CPUState *cs = CPU(cpu);
+        stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
+        trigger_pgm_exception(env, type, ilen);
+    }
+}
+
+static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
+                               uint64_t asc, int rw, bool exc)
+{
+    uint64_t tec;
+
+    tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | 4 | asc >> 46;
+
+    DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
+
+    if (!exc) {
+        return;
+    }
+
+    trigger_access_exception(env, PGM_PROTECTION, ILEN_LATER_INC, tec);
+}
+
+static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
+                               uint32_t type, uint64_t asc, int rw, bool exc)
+{
+    int ilen = ILEN_LATER;
+    uint64_t tec;
+
+    tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | asc >> 46;
+
+    DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
+
+    if (!exc) {
+        return;
+    }
+
+    /* Code accesses have an undefined ilc.  */
+    if (rw == MMU_INST_FETCH) {
+        ilen = 2;
+    }
+
+    trigger_access_exception(env, type, ilen, tec);
+}
+
+/**
+ * Translate real address to absolute (= physical)
+ * address by taking care of the prefix mapping.
+ */
+static target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
+{
+    if (raddr < 0x2000) {
+        return raddr + env->psa;    /* Map the lowcore. */
+    } else if (raddr >= env->psa && raddr < env->psa + 0x2000) {
+        return raddr - env->psa;    /* Map the 0 page. */
+    }
+    return raddr;
+}
+
+/* Decode page table entry (normal 4KB page) */
+static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
+                             uint64_t asc, uint64_t pt_entry,
+                             target_ulong *raddr, int *flags, int rw, bool exc)
+{
+    if (pt_entry & _PAGE_INVALID) {
+        DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, pt_entry);
+        trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw, exc);
+        return -1;
+    }
+    if (pt_entry & _PAGE_RES0) {
+        trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
+        return -1;
+    }
+    if (pt_entry & _PAGE_RO) {
+        *flags &= ~PAGE_WRITE;
+    }
+
+    *raddr = pt_entry & _ASCE_ORIGIN;
+
+    PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, pt_entry);
+
+    return 0;
+}
+
+#define VADDR_PX    0xff000         /* Page index bits */
+
+/* Decode segment table entry */
+static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
+                                 uint64_t asc, uint64_t st_entry,
+                                 target_ulong *raddr, int *flags, int rw,
+                                 bool exc)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+    uint64_t origin, offs, pt_entry;
+
+    if (st_entry & _SEGMENT_ENTRY_RO) {
+        *flags &= ~PAGE_WRITE;
+    }
+
+    if ((st_entry & _SEGMENT_ENTRY_FC) && (env->cregs[0] & CR0_EDAT)) {
+        /* Decode EDAT1 segment frame absolute address (1MB page) */
+        *raddr = (st_entry & 0xfffffffffff00000ULL) | (vaddr & 0xfffff);
+        PTE_DPRINTF("%s: SEG=0x%" PRIx64 "\n", __func__, st_entry);
+        return 0;
+    }
+
+    /* Look up 4KB page entry */
+    origin = st_entry & _SEGMENT_ENTRY_ORIGIN;
+    offs  = (vaddr & VADDR_PX) >> 9;
+    pt_entry = ldq_phys(cs->as, origin + offs);
+    PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
+                __func__, origin, offs, pt_entry);
+    return mmu_translate_pte(env, vaddr, asc, pt_entry, raddr, flags, rw, exc);
+}
+
+/* Decode region table entries */
+static int mmu_translate_region(CPUS390XState *env, target_ulong vaddr,
+                                uint64_t asc, uint64_t entry, int level,
+                                target_ulong *raddr, int *flags, int rw,
+                                bool exc)
+{
+    CPUState *cs = CPU(s390_env_get_cpu(env));
+    uint64_t origin, offs, new_entry;
+    const int pchks[4] = {
+        PGM_SEGMENT_TRANS, PGM_REG_THIRD_TRANS,
+        PGM_REG_SEC_TRANS, PGM_REG_FIRST_TRANS
+    };
+
+    PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __func__, entry);
+
+    origin = entry & _REGION_ENTRY_ORIGIN;
+    offs = (vaddr >> (17 + 11 * level / 4)) & 0x3ff8;
+
+    new_entry = ldq_phys(cs->as, origin + offs);
+    PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
+                __func__, origin, offs, new_entry);
+
+    if ((new_entry & _REGION_ENTRY_INV) != 0) {
+        DPRINTF("%s: invalid region\n", __func__);
+        trigger_page_fault(env, vaddr, pchks[level / 4], asc, rw, exc);
+        return -1;
+    }
+
+    if ((new_entry & _REGION_ENTRY_TYPE_MASK) != level) {
+        trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
+        return -1;
+    }
+
+    if (level == _ASCE_TYPE_SEGMENT) {
+        return mmu_translate_segment(env, vaddr, asc, new_entry, raddr, flags,
+                                     rw, exc);
+    }
+
+    /* Check region table offset and length */
+    offs = (vaddr >> (28 + 11 * (level - 4) / 4)) & 3;
+    if (offs < ((new_entry & _REGION_ENTRY_TF) >> 6)
+        || offs > (new_entry & _REGION_ENTRY_LENGTH)) {
+        DPRINTF("%s: invalid offset or len (%lx)\n", __func__, new_entry);
+        trigger_page_fault(env, vaddr, pchks[level / 4 - 1], asc, rw, exc);
+        return -1;
+    }
+
+    if ((env->cregs[0] & CR0_EDAT) && (new_entry & _REGION_ENTRY_RO)) {
+        *flags &= ~PAGE_WRITE;
+    }
+
+    /* yet another region */
+    return mmu_translate_region(env, vaddr, asc, new_entry, level - 4,
+                                raddr, flags, rw, exc);
+}
+
+static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
+                              uint64_t asc, uint64_t asce, target_ulong *raddr,
+                              int *flags, int rw, bool exc)
+{
+    int level;
+    int r;
+
+    if (asce & _ASCE_REAL_SPACE) {
+        /* direct mapping */
+        *raddr = vaddr;
+        return 0;
+    }
+
+    level = asce & _ASCE_TYPE_MASK;
+    switch (level) {
+    case _ASCE_TYPE_REGION1:
+        if ((vaddr >> 62) > (asce & _ASCE_TABLE_LENGTH)) {
+            trigger_page_fault(env, vaddr, PGM_REG_FIRST_TRANS, asc, rw, exc);
+            return -1;
+        }
+        break;
+    case _ASCE_TYPE_REGION2:
+        if (vaddr & 0xffe0000000000000ULL) {
+            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
+                    " 0xffe0000000000000ULL\n", __func__, vaddr);
+            trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
+            return -1;
+        }
+        if ((vaddr >> 51 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
+            trigger_page_fault(env, vaddr, PGM_REG_SEC_TRANS, asc, rw, exc);
+            return -1;
+        }
+        break;
+    case _ASCE_TYPE_REGION3:
+        if (vaddr & 0xfffffc0000000000ULL) {
+            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
+                    " 0xfffffc0000000000ULL\n", __func__, vaddr);
+            trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
+            return -1;
+        }
+        if ((vaddr >> 40 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
+            trigger_page_fault(env, vaddr, PGM_REG_THIRD_TRANS, asc, rw, exc);
+            return -1;
+        }
+        break;
+    case _ASCE_TYPE_SEGMENT:
+        if (vaddr & 0xffffffff80000000ULL) {
+            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
+                    " 0xffffffff80000000ULL\n", __func__, vaddr);
+            trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
+            return -1;
+        }
+        if ((vaddr >> 29 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
+            trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw, exc);
+            return -1;
+        }
+        break;
+    }
+
+    r = mmu_translate_region(env, vaddr, asc, asce, level, raddr, flags, rw,
+                             exc);
+    if (rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE)) {
+        trigger_prot_fault(env, vaddr, asc, rw, exc);
+        return -1;
+    }
+
+    return r;
+}
+
+/**
+ * Translate a virtual (logical) address into a physical (absolute) address.
+ * @param vaddr  the virtual address
+ * @param rw     0 = read, 1 = write, 2 = code fetch
+ * @param asc    address space control (one of the PSW_ASC_* modes)
+ * @param raddr  the translated address is stored to this pointer
+ * @param flags  the PAGE_READ/WRITE/EXEC flags are stored to this pointer
+ * @param exc    true = inject a program check if a fault occurred
+ * @return       0 if the translation was successful, -1 if a fault occurred
+ */
+int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
+                  target_ulong *raddr, int *flags, bool exc)
+{
+    int r = -1;
+    uint8_t *sk;
+
+    *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+    vaddr &= TARGET_PAGE_MASK;
+
+    if (!(env->psw.mask & PSW_MASK_DAT)) {
+        *raddr = vaddr;
+        r = 0;
+        goto out;
+    }
+
+    switch (asc) {
+    case PSW_ASC_PRIMARY:
+        PTE_DPRINTF("%s: asc=primary\n", __func__);
+        r = mmu_translate_asce(env, vaddr, asc, env->cregs[1], raddr, flags,
+                               rw, exc);
+        break;
+    case PSW_ASC_HOME:
+        PTE_DPRINTF("%s: asc=home\n", __func__);
+        r = mmu_translate_asce(env, vaddr, asc, env->cregs[13], raddr, flags,
+                               rw, exc);
+        break;
+    case PSW_ASC_SECONDARY:
+        PTE_DPRINTF("%s: asc=secondary\n", __func__);
+        /*
+         * Instruction: Primary
+         * Data: Secondary
+         */
+        if (rw == MMU_INST_FETCH) {
+            r = mmu_translate_asce(env, vaddr, PSW_ASC_PRIMARY, env->cregs[1],
+                                   raddr, flags, rw, exc);
+            *flags &= ~(PAGE_READ | PAGE_WRITE);
+        } else {
+            r = mmu_translate_asce(env, vaddr, PSW_ASC_SECONDARY, env->cregs[7],
+                                   raddr, flags, rw, exc);
+            *flags &= ~(PAGE_EXEC);
+        }
+        break;
+    case PSW_ASC_ACCREG:
+    default:
+        hw_error("guest switched to unknown asc mode\n");
+        break;
+    }
+
+ out:
+    /* Convert real address -> absolute address */
+    *raddr = mmu_real2abs(env, *raddr);
+
+    if (*raddr < ram_size) {
+        sk = &env->storage_keys[*raddr / TARGET_PAGE_SIZE];
+        if (*flags & PAGE_READ) {
+            *sk |= SK_R;
+        }
+
+        if (*flags & PAGE_WRITE) {
+            *sk |= SK_C;
+        }
+    }
+
+    return r;
+}
+
+/**
+ * lowprot_enabled: Check whether low-address protection is enabled
+ */
+static bool lowprot_enabled(const CPUS390XState *env)
+{
+    if (!(env->cregs[0] & CR0_LOWPROT)) {
+        return false;
+    }
+    if (!(env->psw.mask & PSW_MASK_DAT)) {
+        return true;
+    }
+
+    /* Check the private-space control bit */
+    switch (env->psw.mask & PSW_MASK_ASC) {
+    case PSW_ASC_PRIMARY:
+        return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
+    case PSW_ASC_SECONDARY:
+        return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
+    case PSW_ASC_HOME:
+        return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
+    default:
+        /* We don't support access register mode */
+        error_report("unsupported addressing mode");
+        exit(1);
+    }
+}
+
+/**
+ * translate_pages: Translate a set of consecutive logical page addresses
+ * to absolute addresses
+ */
+static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
+                           target_ulong *pages, bool is_write)
+{
+    bool lowprot = is_write && lowprot_enabled(&cpu->env);
+    uint64_t asc = cpu->env.psw.mask & PSW_MASK_ASC;
+    CPUS390XState *env = &cpu->env;
+    int ret, i, pflags;
+
+    for (i = 0; i < nr_pages; i++) {
+        /* Low-address protection? */
+        if (lowprot && (addr < 512 || (addr >= 4096 && addr < 4096 + 512))) {
+            trigger_access_exception(env, PGM_PROTECTION, ILEN_LATER_INC, 0);
+            return -EACCES;
+        }
+        ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, true);
+        if (ret) {
+            return ret;
+        }
+        if (!address_space_access_valid(&address_space_memory, pages[i],
+                                        TARGET_PAGE_SIZE, is_write)) {
+            program_interrupt(env, PGM_ADDRESSING, 0);
+            return -EFAULT;
+        }
+        addr += TARGET_PAGE_SIZE;
+    }
+
+    return 0;
+}
+
+/**
+ * s390_cpu_virt_mem_rw:
+ * @laddr:     the logical start address
+ * @ar:        the access register number
+ * @hostbuf:   buffer in host memory. NULL = do only checks w/o copying
+ * @len:       length that should be transferred
+ * @is_write:  true = write, false = read
+ * Returns:    0 on success, non-zero if an exception occurred
+ *
+ * Copy from/to guest memory using logical addresses. Note that we inject a
+ * program interrupt in case there is an error while accessing the memory.
+ */
+int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
+                         int len, bool is_write)
+{
+    int currlen, nr_pages, i;
+    target_ulong *pages;
+    int ret;
+
+    if (kvm_enabled()) {
+        ret = kvm_s390_mem_op(cpu, laddr, ar, hostbuf, len, is_write);
+        if (ret >= 0) {
+            return ret;
+        }
+    }
+
+    nr_pages = (((laddr & ~TARGET_PAGE_MASK) + len - 1) >> TARGET_PAGE_BITS)
+               + 1;
+    pages = g_malloc(nr_pages * sizeof(*pages));
+
+    ret = translate_pages(cpu, laddr, nr_pages, pages, is_write);
+    if (ret == 0 && hostbuf != NULL) {
+        /* Copy data by stepping through the area page by page */
+        for (i = 0; i < nr_pages; i++) {
+            currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE));
+            cpu_physical_memory_rw(pages[i] | (laddr & ~TARGET_PAGE_MASK),
+                                   hostbuf, currlen, is_write);
+            laddr += currlen;
+            hostbuf += currlen;
+            len -= currlen;
+        }
+    }
+
+    g_free(pages);
+    return ret;
+}
diff --git a/qemu/target-s390x/translate.c b/qemu/target-s390x/translate.c
new file mode 100644
index 000000000..c748290d5
--- /dev/null
+++ b/qemu/target-s390x/translate.c
@@ -0,0 +1,5473 @@
+/*
+ *  S/390 translation
+ *
+ *  Copyright (c) 2009 Ulrich Hecht
+ *  Copyright (c) 2010 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* #define DEBUG_INLINE_BRANCHES */
+#define S390X_DEBUG_DISAS
+/* #define S390X_DEBUG_DISAS_VERBOSE */
+
+#ifdef S390X_DEBUG_DISAS_VERBOSE
+#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
+#else
+#  define LOG_DISAS(...) do { } while (0)
+#endif
+
+#include "cpu.h"
+#include "disas/disas.h"
+#include "tcg-op.h"
+#include "qemu/log.h"
+#include "qemu/host-utils.h"
+#include "exec/cpu_ldst.h"
+
+/* global register indexes */
+static TCGv_ptr cpu_env;
+
+#include "exec/gen-icount.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+
+
+/* Information that (most) every instruction needs to manipulate.  */
+typedef struct DisasContext DisasContext;
+typedef struct DisasInsn DisasInsn;
+typedef struct DisasFields DisasFields;
+
+struct DisasContext {
+    struct TranslationBlock *tb;
+    const DisasInsn *insn;
+    DisasFields *fields;
+    uint64_t pc, next_pc;
+    enum cc_op cc_op;
+    bool singlestep_enabled;
+};
+
+/* Information carried about a condition to be evaluated.  */
+typedef struct {
+    TCGCond cond:8;
+    bool is_64;
+    bool g1;
+    bool g2;
+    union {
+        struct { TCGv_i64 a, b; } s64;
+        struct { TCGv_i32 a, b; } s32;
+    } u;
+} DisasCompare;
+
+#define DISAS_EXCP 4
+
+#ifdef DEBUG_INLINE_BRANCHES
+static uint64_t inline_branch_hit[CC_OP_MAX];
+static uint64_t inline_branch_miss[CC_OP_MAX];
+#endif
+
+static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
+{
+    if (!(s->tb->flags & FLAG_MASK_64)) {
+        if (s->tb->flags & FLAG_MASK_32) {
+            return pc | 0x80000000;
+        }
+    }
+    return pc;
+}
+
+void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+                         int flags)
+{
+    S390CPU *cpu = S390_CPU(cs);
+    CPUS390XState *env = &cpu->env;
+    int i;
+
+    if (env->cc_op > 3) {
+        cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
+                    env->psw.mask, env->psw.addr, cc_name(env->cc_op));
+    } else {
+        cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
+                    env->psw.mask, env->psw.addr, env->cc_op);
+    }
+
+    for (i = 0; i < 16; i++) {
+        cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
+        if ((i % 4) == 3) {
+            cpu_fprintf(f, "\n");
+        } else {
+            cpu_fprintf(f, " ");
+        }
+    }
+
+    for (i = 0; i < 16; i++) {
+        cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
+        if ((i % 4) == 3) {
+            cpu_fprintf(f, "\n");
+        } else {
+            cpu_fprintf(f, " ");
+        }
+    }
+
+    for (i = 0; i < 32; i++) {
+        cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
+                    env->vregs[i][0].ll, env->vregs[i][1].ll);
+        cpu_fprintf(f, (i % 2) ? " " : "\n");
+    }
+
+#ifndef CONFIG_USER_ONLY
+    for (i = 0; i < 16; i++) {
+        cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
+        if ((i % 4) == 3) {
+            cpu_fprintf(f, "\n");
+        } else {
+            cpu_fprintf(f, " ");
+        }
+    }
+#endif
+
+#ifdef DEBUG_INLINE_BRANCHES
+    for (i = 0; i < CC_OP_MAX; i++) {
+        cpu_fprintf(f, "  %15s = %10ld\t%10ld\n", cc_name(i),
+                    inline_branch_miss[i], inline_branch_hit[i]);
+    }
+#endif
+
+    cpu_fprintf(f, "\n");
+}
+
+static TCGv_i64 psw_addr;
+static TCGv_i64 psw_mask;
+static TCGv_i64 gbea;
+
+static TCGv_i32 cc_op;
+static TCGv_i64 cc_src;
+static TCGv_i64 cc_dst;
+static TCGv_i64 cc_vr;
+
+static char cpu_reg_names[32][4];
+static TCGv_i64 regs[16];
+static TCGv_i64 fregs[16];
+
+static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
+
+void s390x_translate_init(void)
+{
+    int i;
+
+    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+    psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
+                                      offsetof(CPUS390XState, psw.addr),
+                                      "psw_addr");
+    psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
+                                      offsetof(CPUS390XState, psw.mask),
+                                      "psw_mask");
+    gbea = tcg_global_mem_new_i64(TCG_AREG0,
+                                  offsetof(CPUS390XState, gbea),
+                                  "gbea");
+
+    cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
+                                   "cc_op");
+    cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
+                                    "cc_src");
+    cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
+                                    "cc_dst");
+    cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
+                                   "cc_vr");
+
+    for (i = 0; i < 16; i++) {
+        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
+        regs[i] = tcg_global_mem_new(TCG_AREG0,
+                                     offsetof(CPUS390XState, regs[i]),
+                                     cpu_reg_names[i]);
+    }
+
+    for (i = 0; i < 16; i++) {
+        snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
+        fregs[i] = tcg_global_mem_new(TCG_AREG0,
+                                      offsetof(CPUS390XState, vregs[i][0].d),
+                                      cpu_reg_names[i + 16]);
+    }
+}
+
+static TCGv_i64 load_reg(int reg)
+{
+    TCGv_i64 r = tcg_temp_new_i64();
+    tcg_gen_mov_i64(r, regs[reg]);
+    return r;
+}
+
+static TCGv_i64 load_freg32_i64(int reg)
+{
+    TCGv_i64 r = tcg_temp_new_i64();
+    tcg_gen_shri_i64(r, fregs[reg], 32);
+    return r;
+}
+
+static void store_reg(int reg, TCGv_i64 v)
+{
+    tcg_gen_mov_i64(regs[reg], v);
+}
+
+static void store_freg(int reg, TCGv_i64 v)
+{
+    tcg_gen_mov_i64(fregs[reg], v);
+}
+
+static void store_reg32_i64(int reg, TCGv_i64 v)
+{
+    /* 32 bit register writes keep the upper half */
+    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
+}
+
+static void store_reg32h_i64(int reg, TCGv_i64 v)
+{
+    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
+}
+
+static void store_freg32_i64(int reg, TCGv_i64 v)
+{
+    tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
+}
+
+static void return_low128(TCGv_i64 dest)
+{
+    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
+}
+
+static void update_psw_addr(DisasContext *s)
+{
+    /* psw.addr */
+    tcg_gen_movi_i64(psw_addr, s->pc);
+}
+
+static void per_branch(DisasContext *s, bool to_next)
+{
+#ifndef CONFIG_USER_ONLY
+    tcg_gen_movi_i64(gbea, s->pc);
+
+    if (s->tb->flags & FLAG_MASK_PER) {
+        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
+        gen_helper_per_branch(cpu_env, gbea, next_pc);
+        if (to_next) {
+            tcg_temp_free_i64(next_pc);
+        }
+    }
+#endif
+}
+
+static void per_branch_cond(DisasContext *s, TCGCond cond,
+                            TCGv_i64 arg1, TCGv_i64 arg2)
+{
+#ifndef CONFIG_USER_ONLY
+    if (s->tb->flags & FLAG_MASK_PER) {
+        TCGLabel *lab = gen_new_label();
+        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
+
+        tcg_gen_movi_i64(gbea, s->pc);
+        gen_helper_per_branch(cpu_env, gbea, psw_addr);
+
+        gen_set_label(lab);
+    } else {
+        TCGv_i64 pc = tcg_const_i64(s->pc);
+        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
+        tcg_temp_free_i64(pc);
+    }
+#endif
+}
+
+static void per_breaking_event(DisasContext *s)
+{
+    tcg_gen_movi_i64(gbea, s->pc);
+}
+
+static void update_cc_op(DisasContext *s)
+{
+    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
+        tcg_gen_movi_i32(cc_op, s->cc_op);
+    }
+}
+
+static void potential_page_fault(DisasContext *s)
+{
+    update_psw_addr(s);
+    update_cc_op(s);
+}
+
+static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
+{
+    return (uint64_t)cpu_lduw_code(env, pc);
+}
+
+static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
+{
+    return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
+}
+
+static int get_mem_index(DisasContext *s)
+{
+    switch (s->tb->flags & FLAG_MASK_ASC) {
+    case PSW_ASC_PRIMARY >> 32:
+        return 0;
+    case PSW_ASC_SECONDARY >> 32:
+        return 1;
+    case PSW_ASC_HOME >> 32:
+        return 2;
+    default:
+        tcg_abort();
+        break;
+    }
+}
+
+static void gen_exception(int excp)
+{
+    TCGv_i32 tmp = tcg_const_i32(excp);
+    gen_helper_exception(cpu_env, tmp);
+    tcg_temp_free_i32(tmp);
+}
+
+static void gen_program_exception(DisasContext *s, int code)
+{
+    TCGv_i32 tmp;
+
+    /* Remember what pgm exeption this was.  */
+    tmp = tcg_const_i32(code);
+    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
+    tcg_temp_free_i32(tmp);
+
+    tmp = tcg_const_i32(s->next_pc - s->pc);
+    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
+    tcg_temp_free_i32(tmp);
+
+    /* Advance past instruction.  */
+    s->pc = s->next_pc;
+    update_psw_addr(s);
+
+    /* Save off cc.  */
+    update_cc_op(s);
+
+    /* Trigger exception.  */
+    gen_exception(EXCP_PGM);
+}
+
+static inline void gen_illegal_opcode(DisasContext *s)
+{
+    gen_program_exception(s, PGM_OPERATION);
+}
+
+static inline void gen_trap(DisasContext *s)
+{
+    TCGv_i32 t;
+
+    /* Set DXC to 0xff.  */
+    t = tcg_temp_new_i32();
+    tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
+    tcg_gen_ori_i32(t, t, 0xff00);
+    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
+    tcg_temp_free_i32(t);
+
+    gen_program_exception(s, PGM_DATA);
+}
+
+#ifndef CONFIG_USER_ONLY
+static void check_privileged(DisasContext *s)
+{
+    if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
+        gen_program_exception(s, PGM_PRIVILEGED);
+    }
+}
+#endif
+
+static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
+{
+    TCGv_i64 tmp = tcg_temp_new_i64();
+    bool need_31 = !(s->tb->flags & FLAG_MASK_64);
+
+    /* Note that d2 is limited to 20 bits, signed.  If we crop negative
+       displacements early we create larger immedate addends.  */
+
+    /* Note that addi optimizes the imm==0 case.  */
+    if (b2 && x2) {
+        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
+        tcg_gen_addi_i64(tmp, tmp, d2);
+    } else if (b2) {
+        tcg_gen_addi_i64(tmp, regs[b2], d2);
+    } else if (x2) {
+        tcg_gen_addi_i64(tmp, regs[x2], d2);
+    } else {
+        if (need_31) {
+            d2 &= 0x7fffffff;
+            need_31 = false;
+        }
+        tcg_gen_movi_i64(tmp, d2);
+    }
+    if (need_31) {
+        tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
+    }
+
+    return tmp;
+}
+
+static inline bool live_cc_data(DisasContext *s)
+{
+    return (s->cc_op != CC_OP_DYNAMIC
+            && s->cc_op != CC_OP_STATIC
+            && s->cc_op > 3);
+}
+
+static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
+{
+    if (live_cc_data(s)) {
+        tcg_gen_discard_i64(cc_src);
+        tcg_gen_discard_i64(cc_dst);
+        tcg_gen_discard_i64(cc_vr);
+    }
+    s->cc_op = CC_OP_CONST0 + val;
+}
+
+static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
+{
+    if (live_cc_data(s)) {
+        tcg_gen_discard_i64(cc_src);
+        tcg_gen_discard_i64(cc_vr);
+    }
+    tcg_gen_mov_i64(cc_dst, dst);
+    s->cc_op = op;
+}
+
+static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
+                                  TCGv_i64 dst)
+{
+    if (live_cc_data(s)) {
+        tcg_gen_discard_i64(cc_vr);
+    }
+    tcg_gen_mov_i64(cc_src, src);
+    tcg_gen_mov_i64(cc_dst, dst);
+    s->cc_op = op;
+}
+
+static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
+                                  TCGv_i64 dst, TCGv_i64 vr)
+{
+    tcg_gen_mov_i64(cc_src, src);
+    tcg_gen_mov_i64(cc_dst, dst);
+    tcg_gen_mov_i64(cc_vr, vr);
+    s->cc_op = op;
+}
+
+static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
+{
+    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
+}
+
+static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
+{
+    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
+}
+
+static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
+{
+    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
+}
+
+static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
+{
+    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
+}
+
+/* CC value is in env->cc_op */
+static void set_cc_static(DisasContext *s)
+{
+    if (live_cc_data(s)) {
+        tcg_gen_discard_i64(cc_src);
+        tcg_gen_discard_i64(cc_dst);
+        tcg_gen_discard_i64(cc_vr);
+    }
+    s->cc_op = CC_OP_STATIC;
+}
+
+/* calculates cc into cc_op */
+static void gen_op_calc_cc(DisasContext *s)
+{
+    TCGv_i32 local_cc_op;
+    TCGv_i64 dummy;
+
+    TCGV_UNUSED_I32(local_cc_op);
+    TCGV_UNUSED_I64(dummy);
+    switch (s->cc_op) {
+    default:
+        dummy = tcg_const_i64(0);
+        /* FALLTHRU */
+    case CC_OP_ADD_64:
+    case CC_OP_ADDU_64:
+    case CC_OP_ADDC_64:
+    case CC_OP_SUB_64:
+    case CC_OP_SUBU_64:
+    case CC_OP_SUBB_64:
+    case CC_OP_ADD_32:
+    case CC_OP_ADDU_32:
+    case CC_OP_ADDC_32:
+    case CC_OP_SUB_32:
+    case CC_OP_SUBU_32:
+    case CC_OP_SUBB_32:
+        local_cc_op = tcg_const_i32(s->cc_op);
+        break;
+    case CC_OP_CONST0:
+    case CC_OP_CONST1:
+    case CC_OP_CONST2:
+    case CC_OP_CONST3:
+    case CC_OP_STATIC:
+    case CC_OP_DYNAMIC:
+        break;
+    }
+
+    switch (s->cc_op) {
+    case CC_OP_CONST0:
+    case CC_OP_CONST1:
+    case CC_OP_CONST2:
+    case CC_OP_CONST3:
+        /* s->cc_op is the cc value */
+        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
+        break;
+    case CC_OP_STATIC:
+        /* env->cc_op already is the cc value */
+        break;
+    case CC_OP_NZ:
+    case CC_OP_ABS_64:
+    case CC_OP_NABS_64:
+    case CC_OP_ABS_32:
+    case CC_OP_NABS_32:
+    case CC_OP_LTGT0_32:
+    case CC_OP_LTGT0_64:
+    case CC_OP_COMP_32:
+    case CC_OP_COMP_64:
+    case CC_OP_NZ_F32:
+    case CC_OP_NZ_F64:
+    case CC_OP_FLOGR:
+        /* 1 argument */
+        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
+        break;
+    case CC_OP_ICM:
+    case CC_OP_LTGT_32:
+    case CC_OP_LTGT_64:
+    case CC_OP_LTUGTU_32:
+    case CC_OP_LTUGTU_64:
+    case CC_OP_TM_32:
+    case CC_OP_TM_64:
+    case CC_OP_SLA_32:
+    case CC_OP_SLA_64:
+    case CC_OP_NZ_F128:
+        /* 2 arguments */
+        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
+        break;
+    case CC_OP_ADD_64:
+    case CC_OP_ADDU_64:
+    case CC_OP_ADDC_64:
+    case CC_OP_SUB_64:
+    case CC_OP_SUBU_64:
+    case CC_OP_SUBB_64:
+    case CC_OP_ADD_32:
+    case CC_OP_ADDU_32:
+    case CC_OP_ADDC_32:
+    case CC_OP_SUB_32:
+    case CC_OP_SUBU_32:
+    case CC_OP_SUBB_32:
+        /* 3 arguments */
+        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
+        break;
+    case CC_OP_DYNAMIC:
+        /* unknown operation - assume 3 arguments and cc_op in env */
+        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
+        break;
+    default:
+        tcg_abort();
+    }
+
+    if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
+        tcg_temp_free_i32(local_cc_op);
+    }
+    if (!TCGV_IS_UNUSED_I64(dummy)) {
+        tcg_temp_free_i64(dummy);
+    }
+
+    /* We now have cc in cc_op as constant */
+    set_cc_static(s);
+}
+
+static int use_goto_tb(DisasContext *s, uint64_t dest)
+{
+    /* NOTE: we handle the case where the TB spans two pages here */
+    return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
+             || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
+            && !s->singlestep_enabled
+            && !(s->tb->cflags & CF_LAST_IO)
+            && !(s->tb->flags & FLAG_MASK_PER));
+}
+
+static void account_noninline_branch(DisasContext *s, int cc_op)
+{
+#ifdef DEBUG_INLINE_BRANCHES
+    inline_branch_miss[cc_op]++;
+#endif
+}
+
+static void account_inline_branch(DisasContext *s, int cc_op)
+{
+#ifdef DEBUG_INLINE_BRANCHES
+    inline_branch_hit[cc_op]++;
+#endif
+}
+
+/* Table of mask values to comparison codes, given a comparison as input.
+   For such, CC=3 should not be possible.  */
+static const TCGCond ltgt_cond[16] = {
+    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
+    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
+    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
+    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
+    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
+    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
+    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
+    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
+};
+
+/* Table of mask values to comparison codes, given a logic op as input.
+   For such, only CC=0 and CC=1 should be possible.  */
+static const TCGCond nz_cond[16] = {
+    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
+    TCG_COND_NEVER, TCG_COND_NEVER,
+    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
+    TCG_COND_NE, TCG_COND_NE,
+    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
+    TCG_COND_EQ, TCG_COND_EQ,
+    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
+    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
+};
+
+/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
+   details required to generate a TCG comparison.  */
+static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
+{
+    TCGCond cond;
+    enum cc_op old_cc_op = s->cc_op;
+
+    if (mask == 15 || mask == 0) {
+        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
+        c->u.s32.a = cc_op;
+        c->u.s32.b = cc_op;
+        c->g1 = c->g2 = true;
+        c->is_64 = false;
+        return;
+    }
+
+    /* Find the TCG condition for the mask + cc op.  */
+    switch (old_cc_op) {
+    case CC_OP_LTGT0_32:
+    case CC_OP_LTGT0_64:
+    case CC_OP_LTGT_32:
+    case CC_OP_LTGT_64:
+        cond = ltgt_cond[mask];
+        if (cond == TCG_COND_NEVER) {
+            goto do_dynamic;
+        }
+        account_inline_branch(s, old_cc_op);
+        break;
+
+    case CC_OP_LTUGTU_32:
+    case CC_OP_LTUGTU_64:
+        cond = tcg_unsigned_cond(ltgt_cond[mask]);
+        if (cond == TCG_COND_NEVER) {
+            goto do_dynamic;
+        }
+        account_inline_branch(s, old_cc_op);
+        break;
+
+    case CC_OP_NZ:
+        cond = nz_cond[mask];
+        if (cond == TCG_COND_NEVER) {
+            goto do_dynamic;
+        }
+        account_inline_branch(s, old_cc_op);
+        break;
+
+    case CC_OP_TM_32:
+    case CC_OP_TM_64:
+        switch (mask) {
+        case 8:
+            cond = TCG_COND_EQ;
+            break;
+        case 4 | 2 | 1:
+            cond = TCG_COND_NE;
+            break;
+        default:
+            goto do_dynamic;
+        }
+        account_inline_branch(s, old_cc_op);
+        break;
+
+    case CC_OP_ICM:
+        switch (mask) {
+        case 8:
+            cond = TCG_COND_EQ;
+            break;
+        case 4 | 2 | 1:
+        case 4 | 2:
+            cond = TCG_COND_NE;
+            break;
+        default:
+            goto do_dynamic;
+        }
+        account_inline_branch(s, old_cc_op);
+        break;
+
+    case CC_OP_FLOGR:
+        switch (mask & 0xa) {
+        case 8: /* src == 0 -> no one bit found */
+            cond = TCG_COND_EQ;
+            break;
+        case 2: /* src != 0 -> one bit found */
+            cond = TCG_COND_NE;
+            break;
+        default:
+            goto do_dynamic;
+        }
+        account_inline_branch(s, old_cc_op);
+        break;
+
+    case CC_OP_ADDU_32:
+    case CC_OP_ADDU_64:
+        switch (mask) {
+        case 8 | 2: /* vr == 0 */
+            cond = TCG_COND_EQ;
+            break;
+        case 4 | 1: /* vr != 0 */
+            cond = TCG_COND_NE;
+            break;
+        case 8 | 4: /* no carry -> vr >= src */
+            cond = TCG_COND_GEU;
+            break;
+        case 2 | 1: /* carry -> vr < src */
+            cond = TCG_COND_LTU;
+            break;
+        default:
+            goto do_dynamic;
+        }
+        account_inline_branch(s, old_cc_op);
+        break;
+
+    case CC_OP_SUBU_32:
+    case CC_OP_SUBU_64:
+        /* Note that CC=0 is impossible; treat it as dont-care.  */
+        switch (mask & 7) {
+        case 2: /* zero -> op1 == op2 */
+            cond = TCG_COND_EQ;
+            break;
+        case 4 | 1: /* !zero -> op1 != op2 */
+            cond = TCG_COND_NE;
+            break;
+        case 4: /* borrow (!carry) -> op1 < op2 */
+            cond = TCG_COND_LTU;
+            break;
+        case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
+            cond = TCG_COND_GEU;
+            break;
+        default:
+            goto do_dynamic;
+        }
+        account_inline_branch(s, old_cc_op);
+        break;
+
+    default:
+    do_dynamic:
+        /* Calculate cc value.  */
+        gen_op_calc_cc(s);
+        /* FALLTHRU */
+
+    case CC_OP_STATIC:
+        /* Jump based on CC.  We'll load up the real cond below;
+           the assignment here merely avoids a compiler warning.  */
+        account_noninline_branch(s, old_cc_op);
+        old_cc_op = CC_OP_STATIC;
+        cond = TCG_COND_NEVER;
+        break;
+    }
+
+    /* Load up the arguments of the comparison.  */
+    c->is_64 = true;
+    c->g1 = c->g2 = false;
+    switch (old_cc_op) {
+    case CC_OP_LTGT0_32:
+        c->is_64 = false;
+        c->u.s32.a = tcg_temp_new_i32();
+        tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
+        c->u.s32.b = tcg_const_i32(0);
+        break;
+    case CC_OP_LTGT_32:
+    case CC_OP_LTUGTU_32:
+    case CC_OP_SUBU_32:
+        c->is_64 = false;
+        c->u.s32.a = tcg_temp_new_i32();
+        tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
+        c->u.s32.b = tcg_temp_new_i32();
+        tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
+        break;
+
+    case CC_OP_LTGT0_64:
+    case CC_OP_NZ:
+    case CC_OP_FLOGR:
+        c->u.s64.a = cc_dst;
+        c->u.s64.b = tcg_const_i64(0);
+        c->g1 = true;
+        break;
+    case CC_OP_LTGT_64:
+    case CC_OP_LTUGTU_64:
+    case CC_OP_SUBU_64:
+        c->u.s64.a = cc_src;
+        c->u.s64.b = cc_dst;
+        c->g1 = c->g2 = true;
+        break;
+
+    case CC_OP_TM_32:
+    case CC_OP_TM_64:
+    case CC_OP_ICM:
+        c->u.s64.a = tcg_temp_new_i64();
+        c->u.s64.b = tcg_const_i64(0);
+        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
+        break;
+
+    case CC_OP_ADDU_32:
+        c->is_64 = false;
+        c->u.s32.a = tcg_temp_new_i32();
+        c->u.s32.b = tcg_temp_new_i32();
+        tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
+        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
+            tcg_gen_movi_i32(c->u.s32.b, 0);
+        } else {
+            tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
+        }
+        break;
+
+    case CC_OP_ADDU_64:
+        c->u.s64.a = cc_vr;
+        c->g1 = true;
+        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
+            c->u.s64.b = tcg_const_i64(0);
+        } else {
+            c->u.s64.b = cc_src;
+            c->g2 = true;
+        }
+        break;
+
+    case CC_OP_STATIC:
+        c->is_64 = false;
+        c->u.s32.a = cc_op;
+        c->g1 = true;
+        switch (mask) {
+        case 0x8 | 0x4 | 0x2: /* cc != 3 */
+            cond = TCG_COND_NE;
+            c->u.s32.b = tcg_const_i32(3);
+            break;
+        case 0x8 | 0x4 | 0x1: /* cc != 2 */
+            cond = TCG_COND_NE;
+            c->u.s32.b = tcg_const_i32(2);
+            break;
+        case 0x8 | 0x2 | 0x1: /* cc != 1 */
+            cond = TCG_COND_NE;
+            c->u.s32.b = tcg_const_i32(1);
+            break;
+        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
+            cond = TCG_COND_EQ;
+            c->g1 = false;
+            c->u.s32.a = tcg_temp_new_i32();
+            c->u.s32.b = tcg_const_i32(0);
+            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
+            break;
+        case 0x8 | 0x4: /* cc < 2 */
+            cond = TCG_COND_LTU;
+            c->u.s32.b = tcg_const_i32(2);
+            break;
+        case 0x8: /* cc == 0 */
+            cond = TCG_COND_EQ;
+            c->u.s32.b = tcg_const_i32(0);
+            break;
+        case 0x4 | 0x2 | 0x1: /* cc != 0 */
+            cond = TCG_COND_NE;
+            c->u.s32.b = tcg_const_i32(0);
+            break;
+        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
+            cond = TCG_COND_NE;
+            c->g1 = false;
+            c->u.s32.a = tcg_temp_new_i32();
+            c->u.s32.b = tcg_const_i32(0);
+            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
+            break;
+        case 0x4: /* cc == 1 */
+            cond = TCG_COND_EQ;
+            c->u.s32.b = tcg_const_i32(1);
+            break;
+        case 0x2 | 0x1: /* cc > 1 */
+            cond = TCG_COND_GTU;
+            c->u.s32.b = tcg_const_i32(1);
+            break;
+        case 0x2: /* cc == 2 */
+            cond = TCG_COND_EQ;
+            c->u.s32.b = tcg_const_i32(2);
+            break;
+        case 0x1: /* cc == 3 */
+            cond = TCG_COND_EQ;
+            c->u.s32.b = tcg_const_i32(3);
+            break;
+        default:
+            /* CC is masked by something else: (8 >> cc) & mask.  */
+            cond = TCG_COND_NE;
+            c->g1 = false;
+            c->u.s32.a = tcg_const_i32(8);
+            c->u.s32.b = tcg_const_i32(0);
+            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
+            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
+            break;
+        }
+        break;
+
+    default:
+        abort();
+    }
+    c->cond = cond;
+}
+
+static void free_compare(DisasCompare *c)
+{
+    if (!c->g1) {
+        if (c->is_64) {
+            tcg_temp_free_i64(c->u.s64.a);
+        } else {
+            tcg_temp_free_i32(c->u.s32.a);
+        }
+    }
+    if (!c->g2) {
+        if (c->is_64) {
+            tcg_temp_free_i64(c->u.s64.b);
+        } else {
+            tcg_temp_free_i32(c->u.s32.b);
+        }
+    }
+}
+
+/* ====================================================================== */
+/* Define the insn format enumeration.  */
+#define F0(N)                         FMT_##N,
+#define F1(N, X1)                     F0(N)
+#define F2(N, X1, X2)                 F0(N)
+#define F3(N, X1, X2, X3)             F0(N)
+#define F4(N, X1, X2, X3, X4)         F0(N)
+#define F5(N, X1, X2, X3, X4, X5)     F0(N)
+
+typedef enum {
+#include "insn-format.def"
+} DisasFormat;
+
+#undef F0
+#undef F1
+#undef F2
+#undef F3
+#undef F4
+#undef F5
+
+/* Define a structure to hold the decoded fields.  We'll store each inside
+   an array indexed by an enum.  In order to conserve memory, we'll arrange
+   for fields that do not exist at the same time to overlap, thus the "C"
+   for compact.  For checking purposes there is an "O" for original index
+   as well that will be applied to availability bitmaps.  */
+
+enum DisasFieldIndexO {
+    FLD_O_r1,
+    FLD_O_r2,
+    FLD_O_r3,
+    FLD_O_m1,
+    FLD_O_m3,
+    FLD_O_m4,
+    FLD_O_b1,
+    FLD_O_b2,
+    FLD_O_b4,
+    FLD_O_d1,
+    FLD_O_d2,
+    FLD_O_d4,
+    FLD_O_x2,
+    FLD_O_l1,
+    FLD_O_l2,
+    FLD_O_i1,
+    FLD_O_i2,
+    FLD_O_i3,
+    FLD_O_i4,
+    FLD_O_i5
+};
+
+enum DisasFieldIndexC {
+    FLD_C_r1 = 0,
+    FLD_C_m1 = 0,
+    FLD_C_b1 = 0,
+    FLD_C_i1 = 0,
+
+    FLD_C_r2 = 1,
+    FLD_C_b2 = 1,
+    FLD_C_i2 = 1,
+
+    FLD_C_r3 = 2,
+    FLD_C_m3 = 2,
+    FLD_C_i3 = 2,
+
+    FLD_C_m4 = 3,
+    FLD_C_b4 = 3,
+    FLD_C_i4 = 3,
+    FLD_C_l1 = 3,
+
+    FLD_C_i5 = 4,
+    FLD_C_d1 = 4,
+
+    FLD_C_d2 = 5,
+
+    FLD_C_d4 = 6,
+    FLD_C_x2 = 6,
+    FLD_C_l2 = 6,
+
+    NUM_C_FIELD = 7
+};
+
+struct DisasFields {
+    uint64_t raw_insn;
+    unsigned op:8;
+    unsigned op2:8;
+    unsigned presentC:16;
+    unsigned int presentO;
+    int c[NUM_C_FIELD];
+};
+
+/* This is the way fields are to be accessed out of DisasFields.  */
+#define have_field(S, F)  have_field1((S), FLD_O_##F)
+#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
+
+static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
+{
+    return (f->presentO >> c) & 1;
+}
+
+static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
+                      enum DisasFieldIndexC c)
+{
+    assert(have_field1(f, o));
+    return f->c[c];
+}
+
+/* Describe the layout of each field in each format.  */
+typedef struct DisasField {
+    unsigned int beg:8;
+    unsigned int size:8;
+    unsigned int type:2;
+    unsigned int indexC:6;
+    enum DisasFieldIndexO indexO:8;
+} DisasField;
+
+typedef struct DisasFormatInfo {
+    DisasField op[NUM_C_FIELD];
+} DisasFormatInfo;
+
+#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
+#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
+#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
+                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
+#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
+                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
+                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
+#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
+                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
+#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
+                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
+                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
+#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
+#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
+
+#define F0(N)                     { { } },
+#define F1(N, X1)                 { { X1 } },
+#define F2(N, X1, X2)             { { X1, X2 } },
+#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
+#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
+#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
+
+static const DisasFormatInfo format_info[] = {
+#include "insn-format.def"
+};
+
+#undef F0
+#undef F1
+#undef F2
+#undef F3
+#undef F4
+#undef F5
+#undef R
+#undef M
+#undef BD
+#undef BXD
+#undef BDL
+#undef BXDL
+#undef I
+#undef L
+
+/* Generally, we'll extract operands into this structures, operate upon
+   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
+   of routines below for more details.  */
+typedef struct {
+    bool g_out, g_out2, g_in1, g_in2;
+    TCGv_i64 out, out2, in1, in2;
+    TCGv_i64 addr1;
+} DisasOps;
+
+/* Instructions can place constraints on their operands, raising specification
+   exceptions if they are violated.  To make this easy to automate, each "in1",
+   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
+   of the following, or 0.  To make this easy to document, we'll put the
+   SPEC_<name> defines next to <name>.  */
+
+#define SPEC_r1_even    1
+#define SPEC_r2_even    2
+#define SPEC_r3_even    4
+#define SPEC_r1_f128    8
+#define SPEC_r2_f128    16
+
+/* Return values from translate_one, indicating the state of the TB.  */
+typedef enum {
+    /* Continue the TB.  */
+    NO_EXIT,
+    /* We have emitted one or more goto_tb.  No fixup required.  */
+    EXIT_GOTO_TB,
+    /* We are not using a goto_tb (for whatever reason), but have updated
+       the PC (for whatever reason), so there's no need to do it again on
+       exiting the TB.  */
+    EXIT_PC_UPDATED,
+    /* We are exiting the TB, but have neither emitted a goto_tb, nor
+       updated the PC for the next instruction to be executed.  */
+    EXIT_PC_STALE,
+    /* We are ending the TB with a noreturn function call, e.g. longjmp.
+       No following code will be executed.  */
+    EXIT_NORETURN,
+} ExitStatus;
+
+typedef enum DisasFacility {
+    FAC_Z,                  /* zarch (default) */
+    FAC_CASS,               /* compare and swap and store */
+    FAC_CASS2,              /* compare and swap and store 2*/
+    FAC_DFP,                /* decimal floating point */
+    FAC_DFPR,               /* decimal floating point rounding */
+    FAC_DO,                 /* distinct operands */
+    FAC_EE,                 /* execute extensions */
+    FAC_EI,                 /* extended immediate */
+    FAC_FPE,                /* floating point extension */
+    FAC_FPSSH,              /* floating point support sign handling */
+    FAC_FPRGR,              /* FPR-GR transfer */
+    FAC_GIE,                /* general instructions extension */
+    FAC_HFP_MA,             /* HFP multiply-and-add/subtract */
+    FAC_HW,                 /* high-word */
+    FAC_IEEEE_SIM,          /* IEEE exception sumilation */
+    FAC_MIE,                /* miscellaneous-instruction-extensions */
+    FAC_LAT,                /* load-and-trap */
+    FAC_LOC,                /* load/store on condition */
+    FAC_LD,                 /* long displacement */
+    FAC_PC,                 /* population count */
+    FAC_SCF,                /* store clock fast */
+    FAC_SFLE,               /* store facility list extended */
+    FAC_ILA,                /* interlocked access facility 1 */
+} DisasFacility;
+
+struct DisasInsn {
+    unsigned opc:16;
+    DisasFormat fmt:8;
+    DisasFacility fac:8;
+    unsigned spec:8;
+
+    const char *name;
+
+    void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
+    void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
+    void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
+    void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
+    void (*help_cout)(DisasContext *, DisasOps *);
+    ExitStatus (*help_op)(DisasContext *, DisasOps *);
+
+    uint64_t data;
+};
+
+/* ====================================================================== */
+/* Miscellaneous helpers, used by several operations.  */
+
+static void help_l2_shift(DisasContext *s, DisasFields *f,
+                          DisasOps *o, int mask)
+{
+    int b2 = get_field(f, b2);
+    int d2 = get_field(f, d2);
+
+    if (b2 == 0) {
+        o->in2 = tcg_const_i64(d2 & mask);
+    } else {
+        o->in2 = get_address(s, 0, b2, d2);
+        tcg_gen_andi_i64(o->in2, o->in2, mask);
+    }
+}
+
+static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
+{
+    if (dest == s->next_pc) {
+        per_branch(s, true);
+        return NO_EXIT;
+    }
+    if (use_goto_tb(s, dest)) {
+        update_cc_op(s);
+        per_breaking_event(s);
+        tcg_gen_goto_tb(0);
+        tcg_gen_movi_i64(psw_addr, dest);
+        tcg_gen_exit_tb((uintptr_t)s->tb);
+        return EXIT_GOTO_TB;
+    } else {
+        tcg_gen_movi_i64(psw_addr, dest);
+        per_branch(s, false);
+        return EXIT_PC_UPDATED;
+    }
+}
+
+static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
+                              bool is_imm, int imm, TCGv_i64 cdest)
+{
+    ExitStatus ret;
+    uint64_t dest = s->pc + 2 * imm;
+    TCGLabel *lab;
+
+    /* Take care of the special cases first.  */
+    if (c->cond == TCG_COND_NEVER) {
+        ret = NO_EXIT;
+        goto egress;
+    }
+    if (is_imm) {
+        if (dest == s->next_pc) {
+            /* Branch to next.  */
+            per_branch(s, true);
+            ret = NO_EXIT;
+            goto egress;
+        }
+        if (c->cond == TCG_COND_ALWAYS) {
+            ret = help_goto_direct(s, dest);
+            goto egress;
+        }
+    } else {
+        if (TCGV_IS_UNUSED_I64(cdest)) {
+            /* E.g. bcr %r0 -> no branch.  */
+            ret = NO_EXIT;
+            goto egress;
+        }
+        if (c->cond == TCG_COND_ALWAYS) {
+            tcg_gen_mov_i64(psw_addr, cdest);
+            per_branch(s, false);
+            ret = EXIT_PC_UPDATED;
+            goto egress;
+        }
+    }
+
+    if (use_goto_tb(s, s->next_pc)) {
+        if (is_imm && use_goto_tb(s, dest)) {
+            /* Both exits can use goto_tb.  */
+            update_cc_op(s);
+
+            lab = gen_new_label();
+            if (c->is_64) {
+                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
+            } else {
+                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
+            }
+
+            /* Branch not taken.  */
+            tcg_gen_goto_tb(0);
+            tcg_gen_movi_i64(psw_addr, s->next_pc);
+            tcg_gen_exit_tb((uintptr_t)s->tb + 0);
+
+            /* Branch taken.  */
+            gen_set_label(lab);
+            per_breaking_event(s);
+            tcg_gen_goto_tb(1);
+            tcg_gen_movi_i64(psw_addr, dest);
+            tcg_gen_exit_tb((uintptr_t)s->tb + 1);
+
+            ret = EXIT_GOTO_TB;
+        } else {
+            /* Fallthru can use goto_tb, but taken branch cannot.  */
+            /* Store taken branch destination before the brcond.  This
+               avoids having to allocate a new local temp to hold it.
+               We'll overwrite this in the not taken case anyway.  */
+            if (!is_imm) {
+                tcg_gen_mov_i64(psw_addr, cdest);
+            }
+
+            lab = gen_new_label();
+            if (c->is_64) {
+                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
+            } else {
+                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
+            }
+
+            /* Branch not taken.  */
+            update_cc_op(s);
+            tcg_gen_goto_tb(0);
+            tcg_gen_movi_i64(psw_addr, s->next_pc);
+            tcg_gen_exit_tb((uintptr_t)s->tb + 0);
+
+            gen_set_label(lab);
+            if (is_imm) {
+                tcg_gen_movi_i64(psw_addr, dest);
+            }
+            per_breaking_event(s);
+            ret = EXIT_PC_UPDATED;
+        }
+    } else {
+        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
+           Most commonly we're single-stepping or some other condition that
+           disables all use of goto_tb.  Just update the PC and exit.  */
+
+        TCGv_i64 next = tcg_const_i64(s->next_pc);
+        if (is_imm) {
+            cdest = tcg_const_i64(dest);
+        }
+
+        if (c->is_64) {
+            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
+                                cdest, next);
+            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
+        } else {
+            TCGv_i32 t0 = tcg_temp_new_i32();
+            TCGv_i64 t1 = tcg_temp_new_i64();
+            TCGv_i64 z = tcg_const_i64(0);
+            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
+            tcg_gen_extu_i32_i64(t1, t0);
+            tcg_temp_free_i32(t0);
+            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
+            per_branch_cond(s, TCG_COND_NE, t1, z);
+            tcg_temp_free_i64(t1);
+            tcg_temp_free_i64(z);
+        }
+
+        if (is_imm) {
+            tcg_temp_free_i64(cdest);
+        }
+        tcg_temp_free_i64(next);
+
+        ret = EXIT_PC_UPDATED;
+    }
+
+ egress:
+    free_compare(c);
+    return ret;
+}
+
+/* ====================================================================== */
+/* The operations.  These perform the bulk of the work for any insn,
+   usually after the operands have been loaded and output initialized.  */
+
+static ExitStatus op_abs(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 z, n;
+    z = tcg_const_i64(0);
+    n = tcg_temp_new_i64();
+    tcg_gen_neg_i64(n, o->in2);
+    tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
+    tcg_temp_free_i64(n);
+    tcg_temp_free_i64(z);
+    return NO_EXIT;
+}
+
+static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
+    return NO_EXIT;
+}
+
+static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
+    return NO_EXIT;
+}
+
+static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
+    tcg_gen_mov_i64(o->out2, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_add(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_add_i64(o->out, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_addc(DisasContext *s, DisasOps *o)
+{
+    DisasCompare cmp;
+    TCGv_i64 carry;
+
+    tcg_gen_add_i64(o->out, o->in1, o->in2);
+
+    /* The carry flag is the msb of CC, therefore the branch mask that would
+       create that comparison is 3.  Feeding the generated comparison to
+       setcond produces the carry flag that we desire.  */
+    disas_jcc(s, &cmp, 3);
+    carry = tcg_temp_new_i64();
+    if (cmp.is_64) {
+        tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
+    } else {
+        TCGv_i32 t = tcg_temp_new_i32();
+        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
+        tcg_gen_extu_i32_i64(carry, t);
+        tcg_temp_free_i32(t);
+    }
+    free_compare(&cmp);
+
+    tcg_gen_add_i64(o->out, o->out, carry);
+    tcg_temp_free_i64(carry);
+    return NO_EXIT;
+}
+
+static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_adb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_axb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+    return_low128(o->out2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_and(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_and_i64(o->out, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_andi(DisasContext *s, DisasOps *o)
+{
+    int shift = s->insn->data & 0xff;
+    int size = s->insn->data >> 8;
+    uint64_t mask = ((1ull << size) - 1) << shift;
+
+    assert(!o->g_in2);
+    tcg_gen_shli_i64(o->in2, o->in2, shift);
+    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
+    tcg_gen_and_i64(o->out, o->in1, o->in2);
+
+    /* Produce the CC from only the bits manipulated.  */
+    tcg_gen_andi_i64(cc_dst, o->out, mask);
+    set_cc_nz_u64(s, cc_dst);
+    return NO_EXIT;
+}
+
+static ExitStatus op_bas(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
+    if (!TCGV_IS_UNUSED_I64(o->in2)) {
+        tcg_gen_mov_i64(psw_addr, o->in2);
+        per_branch(s, false);
+        return EXIT_PC_UPDATED;
+    } else {
+        return NO_EXIT;
+    }
+}
+
+static ExitStatus op_basi(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
+    return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
+}
+
+static ExitStatus op_bc(DisasContext *s, DisasOps *o)
+{
+    int m1 = get_field(s->fields, m1);
+    bool is_imm = have_field(s->fields, i2);
+    int imm = is_imm ? get_field(s->fields, i2) : 0;
+    DisasCompare c;
+
+    disas_jcc(s, &c, m1);
+    return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    bool is_imm = have_field(s->fields, i2);
+    int imm = is_imm ? get_field(s->fields, i2) : 0;
+    DisasCompare c;
+    TCGv_i64 t;
+
+    c.cond = TCG_COND_NE;
+    c.is_64 = false;
+    c.g1 = false;
+    c.g2 = false;
+
+    t = tcg_temp_new_i64();
+    tcg_gen_subi_i64(t, regs[r1], 1);
+    store_reg32_i64(r1, t);
+    c.u.s32.a = tcg_temp_new_i32();
+    c.u.s32.b = tcg_const_i32(0);
+    tcg_gen_trunc_i64_i32(c.u.s32.a, t);
+    tcg_temp_free_i64(t);
+
+    return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    int imm = get_field(s->fields, i2);
+    DisasCompare c;
+    TCGv_i64 t;
+
+    c.cond = TCG_COND_NE;
+    c.is_64 = false;
+    c.g1 = false;
+    c.g2 = false;
+
+    t = tcg_temp_new_i64();
+    tcg_gen_shri_i64(t, regs[r1], 32);
+    tcg_gen_subi_i64(t, t, 1);
+    store_reg32h_i64(r1, t);
+    c.u.s32.a = tcg_temp_new_i32();
+    c.u.s32.b = tcg_const_i32(0);
+    tcg_gen_trunc_i64_i32(c.u.s32.a, t);
+    tcg_temp_free_i64(t);
+
+    return help_branch(s, &c, 1, imm, o->in2);
+}
+
+static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    bool is_imm = have_field(s->fields, i2);
+    int imm = is_imm ? get_field(s->fields, i2) : 0;
+    DisasCompare c;
+
+    c.cond = TCG_COND_NE;
+    c.is_64 = true;
+    c.g1 = true;
+    c.g2 = false;
+
+    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
+    c.u.s64.a = regs[r1];
+    c.u.s64.b = tcg_const_i64(0);
+
+    return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    int r3 = get_field(s->fields, r3);
+    bool is_imm = have_field(s->fields, i2);
+    int imm = is_imm ? get_field(s->fields, i2) : 0;
+    DisasCompare c;
+    TCGv_i64 t;
+
+    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
+    c.is_64 = false;
+    c.g1 = false;
+    c.g2 = false;
+
+    t = tcg_temp_new_i64();
+    tcg_gen_add_i64(t, regs[r1], regs[r3]);
+    c.u.s32.a = tcg_temp_new_i32();
+    c.u.s32.b = tcg_temp_new_i32();
+    tcg_gen_trunc_i64_i32(c.u.s32.a, t);
+    tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
+    store_reg32_i64(r1, t);
+    tcg_temp_free_i64(t);
+
+    return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    int r3 = get_field(s->fields, r3);
+    bool is_imm = have_field(s->fields, i2);
+    int imm = is_imm ? get_field(s->fields, i2) : 0;
+    DisasCompare c;
+
+    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
+    c.is_64 = true;
+
+    if (r1 == (r3 | 1)) {
+        c.u.s64.b = load_reg(r3 | 1);
+        c.g2 = false;
+    } else {
+        c.u.s64.b = regs[r3 | 1];
+        c.g2 = true;
+    }
+
+    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
+    c.u.s64.a = regs[r1];
+    c.g1 = true;
+
+    return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static ExitStatus op_cj(DisasContext *s, DisasOps *o)
+{
+    int imm, m3 = get_field(s->fields, m3);
+    bool is_imm;
+    DisasCompare c;
+
+    c.cond = ltgt_cond[m3];
+    if (s->insn->data) {
+        c.cond = tcg_unsigned_cond(c.cond);
+    }
+    c.is_64 = c.g1 = c.g2 = true;
+    c.u.s64.a = o->in1;
+    c.u.s64.b = o->in2;
+
+    is_imm = have_field(s->fields, i4);
+    if (is_imm) {
+        imm = get_field(s->fields, i4);
+    } else {
+        imm = 0;
+        o->out = get_address(s, 0, get_field(s->fields, b4),
+                             get_field(s->fields, d4));
+    }
+
+    return help_branch(s, &c, is_imm, imm, o->out);
+}
+
+static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f32(s, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f64(s, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f128(s, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f32(s, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f64(s, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f128(s, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f32(s, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f64(s, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f128(s, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f32(s, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f64(s, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    gen_set_cc_nz_f128(s, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_cegb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    return_low128(o->out2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_celgb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    return_low128(o->out2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
+{
+    int r2 = get_field(s->fields, r2);
+    TCGv_i64 len = tcg_temp_new_i64();
+
+    potential_page_fault(s);
+    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
+    set_cc_static(s);
+    return_low128(o->out);
+
+    tcg_gen_add_i64(regs[r2], regs[r2], len);
+    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
+    tcg_temp_free_i64(len);
+
+    return NO_EXIT;
+}
+
+static ExitStatus op_clc(DisasContext *s, DisasOps *o)
+{
+    int l = get_field(s->fields, l1);
+    TCGv_i32 vl;
+
+    switch (l + 1) {
+    case 1:
+        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
+        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
+        break;
+    case 2:
+        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
+        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
+        break;
+    case 4:
+        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
+        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
+        break;
+    case 8:
+        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
+        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
+        break;
+    default:
+        potential_page_fault(s);
+        vl = tcg_const_i32(l);
+        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
+        tcg_temp_free_i32(vl);
+        set_cc_static(s);
+        return NO_EXIT;
+    }
+    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
+    return NO_EXIT;
+}
+
+static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+    potential_page_fault(s);
+    gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
+    tcg_temp_free_i32(r1);
+    tcg_temp_free_i32(r3);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_clm(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    TCGv_i32 t1 = tcg_temp_new_i32();
+    tcg_gen_trunc_i64_i32(t1, o->in1);
+    potential_page_fault(s);
+    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
+    set_cc_static(s);
+    tcg_temp_free_i32(t1);
+    tcg_temp_free_i32(m3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_clst(DisasContext *s, DisasOps *o)
+{
+    potential_page_fault(s);
+    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
+    set_cc_static(s);
+    return_low128(o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cps(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 t = tcg_temp_new_i64();
+    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
+    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
+    tcg_gen_or_i64(o->out, o->out, t);
+    tcg_temp_free_i64(t);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cs(DisasContext *s, DisasOps *o)
+{
+    /* FIXME: needs an atomic solution for CONFIG_USER_ONLY.  */
+    int d2 = get_field(s->fields, d2);
+    int b2 = get_field(s->fields, b2);
+    int is_64 = s->insn->data;
+    TCGv_i64 addr, mem, cc, z;
+
+    /* Note that in1 = R3 (new value) and
+       in2 = (zero-extended) R1 (expected value).  */
+
+    /* Load the memory into the (temporary) output.  While the PoO only talks
+       about moving the memory to R1 on inequality, if we include equality it
+       means that R1 is equal to the memory in all conditions.  */
+    addr = get_address(s, 0, b2, d2);
+    if (is_64) {
+        tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
+    } else {
+        tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
+    }
+
+    /* Are the memory and expected values (un)equal?  Note that this setcond
+       produces the output CC value, thus the NE sense of the test.  */
+    cc = tcg_temp_new_i64();
+    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
+
+    /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
+       Recall that we are allowed to unconditionally issue the store (and
+       thus any possible write trap), so (re-)store the original contents
+       of MEM in case of inequality.  */
+    z = tcg_const_i64(0);
+    mem = tcg_temp_new_i64();
+    tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
+    if (is_64) {
+        tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
+    } else {
+        tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
+    }
+    tcg_temp_free_i64(z);
+    tcg_temp_free_i64(mem);
+    tcg_temp_free_i64(addr);
+
+    /* Store CC back to cc_op.  Wait until after the store so that any
+       exception gets the old cc_op value.  */
+    tcg_gen_trunc_i64_i32(cc_op, cc);
+    tcg_temp_free_i64(cc);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
+{
+    /* FIXME: needs an atomic solution for CONFIG_USER_ONLY.  */
+    int r1 = get_field(s->fields, r1);
+    int r3 = get_field(s->fields, r3);
+    int d2 = get_field(s->fields, d2);
+    int b2 = get_field(s->fields, b2);
+    TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
+
+    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
+
+    addrh = get_address(s, 0, b2, d2);
+    addrl = get_address(s, 0, b2, d2 + 8);
+    outh = tcg_temp_new_i64();
+    outl = tcg_temp_new_i64();
+
+    tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
+    tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
+
+    /* Fold the double-word compare with arithmetic.  */
+    cc = tcg_temp_new_i64();
+    z = tcg_temp_new_i64();
+    tcg_gen_xor_i64(cc, outh, regs[r1]);
+    tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
+    tcg_gen_or_i64(cc, cc, z);
+    tcg_gen_movi_i64(z, 0);
+    tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
+
+    memh = tcg_temp_new_i64();
+    meml = tcg_temp_new_i64();
+    tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
+    tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
+    tcg_temp_free_i64(z);
+
+    tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
+    tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
+    tcg_temp_free_i64(memh);
+    tcg_temp_free_i64(meml);
+    tcg_temp_free_i64(addrh);
+    tcg_temp_free_i64(addrl);
+
+    /* Save back state now that we've passed all exceptions.  */
+    tcg_gen_mov_i64(regs[r1], outh);
+    tcg_gen_mov_i64(regs[r1 + 1], outl);
+    tcg_gen_trunc_i64_i32(cc_op, cc);
+    tcg_temp_free_i64(outh);
+    tcg_temp_free_i64(outl);
+    tcg_temp_free_i64(cc);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_csp(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    check_privileged(s);
+    gen_helper_csp(cc_op, cpu_env, r1, o->in2);
+    tcg_temp_free_i32(r1);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i32 t2 = tcg_temp_new_i32();
+    tcg_gen_trunc_i64_i32(t2, o->in1);
+    gen_helper_cvd(t1, t2);
+    tcg_temp_free_i32(t2);
+    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
+    tcg_temp_free_i64(t1);
+    return NO_EXIT;
+}
+
+static ExitStatus op_ct(DisasContext *s, DisasOps *o)
+{
+    int m3 = get_field(s->fields, m3);
+    TCGLabel *lab = gen_new_label();
+    TCGCond c;
+
+    c = tcg_invert_cond(ltgt_cond[m3]);
+    if (s->insn->data) {
+        c = tcg_unsigned_cond(c);
+    }
+    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
+
+    /* Trap.  */
+    gen_trap(s);
+
+    gen_set_label(lab);
+    return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_diag(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+    TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
+
+    check_privileged(s);
+    update_psw_addr(s);
+    gen_op_calc_cc(s);
+
+    gen_helper_diag(cpu_env, r1, r3, func_code);
+
+    tcg_temp_free_i32(func_code);
+    tcg_temp_free_i32(r3);
+    tcg_temp_free_i32(r1);
+    return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
+{
+    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
+    return_low128(o->out);
+    return NO_EXIT;
+}
+
+static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
+{
+    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
+    return_low128(o->out);
+    return NO_EXIT;
+}
+
+static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
+{
+    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
+    return_low128(o->out);
+    return NO_EXIT;
+}
+
+static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
+{
+    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
+    return_low128(o->out);
+    return NO_EXIT;
+}
+
+static ExitStatus op_deb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+    return_low128(o->out2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_ear(DisasContext *s, DisasOps *o)
+{
+    int r2 = get_field(s->fields, r2);
+    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
+    return NO_EXIT;
+}
+
+static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
+{
+    /* No cache information provided.  */
+    tcg_gen_movi_i64(o->out, -1);
+    return NO_EXIT;
+}
+
+static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
+    return NO_EXIT;
+}
+
+static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    int r2 = get_field(s->fields, r2);
+    TCGv_i64 t = tcg_temp_new_i64();
+
+    /* Note the "subsequently" in the PoO, which implies a defined result
+       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
+    tcg_gen_shri_i64(t, psw_mask, 32);
+    store_reg32_i64(r1, t);
+    if (r2 != 0) {
+        store_reg32_i64(r2, psw_mask);
+    }
+
+    tcg_temp_free_i64(t);
+    return NO_EXIT;
+}
+
+static ExitStatus op_ex(DisasContext *s, DisasOps *o)
+{
+    /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
+       tb->flags, (ab)use the tb->cs_base field as the address of
+       the template in memory, and grab 8 bits of tb->flags/cflags for
+       the contents of the register.  We would then recognize all this
+       in gen_intermediate_code_internal, generating code for exactly
+       one instruction.  This new TB then gets executed normally.
+
+       On the other hand, this seems to be mostly used for modifying
+       MVC inside of memcpy, which needs a helper call anyway.  So
+       perhaps this doesn't bear thinking about any further.  */
+
+    TCGv_i64 tmp;
+
+    update_psw_addr(s);
+    gen_op_calc_cc(s);
+
+    tmp = tcg_const_i64(s->next_pc);
+    gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
+    tcg_temp_free_i64(tmp);
+
+    return NO_EXIT;
+}
+
+static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_fieb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_fidb(o->out, cpu_env, o->in2, m3);
+    tcg_temp_free_i32(m3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
+    return_low128(o->out2);
+    tcg_temp_free_i32(m3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
+{
+    /* We'll use the original input for cc computation, since we get to
+       compare that against 0, which ought to be better than comparing
+       the real output against 64.  It also lets cc_dst be a convenient
+       temporary during our computation.  */
+    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
+
+    /* R1 = IN ? CLZ(IN) : 64.  */
+    gen_helper_clz(o->out, o->in2);
+
+    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
+       value by 64, which is undefined.  But since the shift is 64 iff the
+       input is zero, we still get the correct result after and'ing.  */
+    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
+    tcg_gen_shr_i64(o->out2, o->out2, o->out);
+    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_icm(DisasContext *s, DisasOps *o)
+{
+    int m3 = get_field(s->fields, m3);
+    int pos, len, base = s->insn->data;
+    TCGv_i64 tmp = tcg_temp_new_i64();
+    uint64_t ccm;
+
+    switch (m3) {
+    case 0xf:
+        /* Effectively a 32-bit load.  */
+        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
+        len = 32;
+        goto one_insert;
+
+    case 0xc:
+    case 0x6:
+    case 0x3:
+        /* Effectively a 16-bit load.  */
+        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
+        len = 16;
+        goto one_insert;
+
+    case 0x8:
+    case 0x4:
+    case 0x2:
+    case 0x1:
+        /* Effectively an 8-bit load.  */
+        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
+        len = 8;
+        goto one_insert;
+
+    one_insert:
+        pos = base + ctz32(m3) * 8;
+        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
+        ccm = ((1ull << len) - 1) << pos;
+        break;
+
+    default:
+        /* This is going to be a sequence of loads and inserts.  */
+        pos = base + 32 - 8;
+        ccm = 0;
+        while (m3) {
+            if (m3 & 0x8) {
+                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
+                tcg_gen_addi_i64(o->in2, o->in2, 1);
+                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
+                ccm |= 0xff << pos;
+            }
+            m3 = (m3 << 1) & 0xf;
+            pos -= 8;
+        }
+        break;
+    }
+
+    tcg_gen_movi_i64(tmp, ccm);
+    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
+    tcg_temp_free_i64(tmp);
+    return NO_EXIT;
+}
+
+static ExitStatus op_insi(DisasContext *s, DisasOps *o)
+{
+    int shift = s->insn->data & 0xff;
+    int size = s->insn->data >> 8;
+    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
+    return NO_EXIT;
+}
+
+static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 t1;
+
+    gen_op_calc_cc(s);
+    tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
+
+    t1 = tcg_temp_new_i64();
+    tcg_gen_shli_i64(t1, psw_mask, 20);
+    tcg_gen_shri_i64(t1, t1, 36);
+    tcg_gen_or_i64(o->out, o->out, t1);
+
+    tcg_gen_extu_i32_i64(t1, cc_op);
+    tcg_gen_shli_i64(t1, t1, 28);
+    tcg_gen_or_i64(o->out, o->out, t1);
+    tcg_temp_free_i64(t1);
+    return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    gen_helper_ipte(cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_iske(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    gen_helper_iske(o->out, cpu_env, o->in2);
+    return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_ldeb(o->out, cpu_env, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_ledb(o->out, cpu_env, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_lxdb(o->out, cpu_env, o->in2);
+    return_low128(o->out2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_lxeb(o->out, cpu_env, o->in2);
+    return_low128(o->out2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
+    return NO_EXIT;
+}
+
+static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
+    return NO_EXIT;
+}
+
+static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
+    return NO_EXIT;
+}
+
+static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
+    return NO_EXIT;
+}
+
+static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
+    return NO_EXIT;
+}
+
+static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
+    return NO_EXIT;
+}
+
+static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
+    return NO_EXIT;
+}
+
+static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
+    return NO_EXIT;
+}
+
+static ExitStatus op_lat(DisasContext *s, DisasOps *o)
+{
+    TCGLabel *lab = gen_new_label();
+    store_reg32_i64(get_field(s->fields, r1), o->in2);
+    /* The value is stored even in case of trap. */
+    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
+    gen_trap(s);
+    gen_set_label(lab);
+    return NO_EXIT;
+}
+
+static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
+{
+    TCGLabel *lab = gen_new_label();
+    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
+    /* The value is stored even in case of trap. */
+    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
+    gen_trap(s);
+    gen_set_label(lab);
+    return NO_EXIT;
+}
+
+static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
+{
+    TCGLabel *lab = gen_new_label();
+    store_reg32h_i64(get_field(s->fields, r1), o->in2);
+    /* The value is stored even in case of trap. */
+    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
+    gen_trap(s);
+    gen_set_label(lab);
+    return NO_EXIT;
+}
+
+static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
+{
+    TCGLabel *lab = gen_new_label();
+    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
+    /* The value is stored even in case of trap. */
+    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
+    gen_trap(s);
+    gen_set_label(lab);
+    return NO_EXIT;
+}
+
+static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
+{
+    TCGLabel *lab = gen_new_label();
+    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
+    /* The value is stored even in case of trap. */
+    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
+    gen_trap(s);
+    gen_set_label(lab);
+    return NO_EXIT;
+}
+
+static ExitStatus op_loc(DisasContext *s, DisasOps *o)
+{
+    DisasCompare c;
+
+    disas_jcc(s, &c, get_field(s->fields, m3));
+
+    if (c.is_64) {
+        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
+                            o->in2, o->in1);
+        free_compare(&c);
+    } else {
+        TCGv_i32 t32 = tcg_temp_new_i32();
+        TCGv_i64 t, z;
+
+        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
+        free_compare(&c);
+
+        t = tcg_temp_new_i64();
+        tcg_gen_extu_i32_i64(t, t32);
+        tcg_temp_free_i32(t32);
+
+        z = tcg_const_i64(0);
+        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
+        tcg_temp_free_i64(t);
+        tcg_temp_free_i64(z);
+    }
+
+    return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_lctl(cpu_env, r1, o->in2, r3);
+    tcg_temp_free_i32(r1);
+    tcg_temp_free_i32(r3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
+    tcg_temp_free_i32(r1);
+    tcg_temp_free_i32(r3);
+    return NO_EXIT;
+}
+static ExitStatus op_lra(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_lra(o->out, cpu_env, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 t1, t2;
+
+    check_privileged(s);
+    per_breaking_event(s);
+
+    t1 = tcg_temp_new_i64();
+    t2 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+    tcg_gen_addi_i64(o->in2, o->in2, 4);
+    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
+    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
+    tcg_gen_shli_i64(t1, t1, 32);
+    gen_helper_load_psw(cpu_env, t1, t2);
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    return EXIT_NORETURN;
+}
+
+static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 t1, t2;
+
+    check_privileged(s);
+    per_breaking_event(s);
+
+    t1 = tcg_temp_new_i64();
+    t2 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
+    tcg_gen_addi_i64(o->in2, o->in2, 8);
+    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
+    gen_helper_load_psw(cpu_env, t1, t2);
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    return EXIT_NORETURN;
+}
+#endif
+
+static ExitStatus op_lam(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+    potential_page_fault(s);
+    gen_helper_lam(cpu_env, r1, o->in2, r3);
+    tcg_temp_free_i32(r1);
+    tcg_temp_free_i32(r3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    int r3 = get_field(s->fields, r3);
+    TCGv_i64 t1, t2;
+
+    /* Only one register to read. */
+    t1 = tcg_temp_new_i64();
+    if (unlikely(r1 == r3)) {
+        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+        store_reg32_i64(r1, t1);
+        tcg_temp_free(t1);
+        return NO_EXIT;
+    }
+
+    /* First load the values of the first and last registers to trigger
+       possible page faults. */
+    t2 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
+    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
+    store_reg32_i64(r1, t1);
+    store_reg32_i64(r3, t2);
+
+    /* Only two registers to read. */
+    if (((r1 + 1) & 15) == r3) {
+        tcg_temp_free(t2);
+        tcg_temp_free(t1);
+        return NO_EXIT;
+    }
+
+    /* Then load the remaining registers. Page fault can't occur. */
+    r3 = (r3 - 1) & 15;
+    tcg_gen_movi_i64(t2, 4);
+    while (r1 != r3) {
+        r1 = (r1 + 1) & 15;
+        tcg_gen_add_i64(o->in2, o->in2, t2);
+        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+        store_reg32_i64(r1, t1);
+    }
+    tcg_temp_free(t2);
+    tcg_temp_free(t1);
+
+    return NO_EXIT;
+}
+
+static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    int r3 = get_field(s->fields, r3);
+    TCGv_i64 t1, t2;
+
+    /* Only one register to read. */
+    t1 = tcg_temp_new_i64();
+    if (unlikely(r1 == r3)) {
+        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+        store_reg32h_i64(r1, t1);
+        tcg_temp_free(t1);
+        return NO_EXIT;
+    }
+
+    /* First load the values of the first and last registers to trigger
+       possible page faults. */
+    t2 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
+    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
+    store_reg32h_i64(r1, t1);
+    store_reg32h_i64(r3, t2);
+
+    /* Only two registers to read. */
+    if (((r1 + 1) & 15) == r3) {
+        tcg_temp_free(t2);
+        tcg_temp_free(t1);
+        return NO_EXIT;
+    }
+
+    /* Then load the remaining registers. Page fault can't occur. */
+    r3 = (r3 - 1) & 15;
+    tcg_gen_movi_i64(t2, 4);
+    while (r1 != r3) {
+        r1 = (r1 + 1) & 15;
+        tcg_gen_add_i64(o->in2, o->in2, t2);
+        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+        store_reg32h_i64(r1, t1);
+    }
+    tcg_temp_free(t2);
+    tcg_temp_free(t1);
+
+    return NO_EXIT;
+}
+
+static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    int r3 = get_field(s->fields, r3);
+    TCGv_i64 t1, t2;
+
+    /* Only one register to read. */
+    if (unlikely(r1 == r3)) {
+        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
+        return NO_EXIT;
+    }
+
+    /* First load the values of the first and last registers to trigger
+       possible page faults. */
+    t1 = tcg_temp_new_i64();
+    t2 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
+    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
+    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
+    tcg_gen_mov_i64(regs[r1], t1);
+    tcg_temp_free(t2);
+
+    /* Only two registers to read. */
+    if (((r1 + 1) & 15) == r3) {
+        tcg_temp_free(t1);
+        return NO_EXIT;
+    }
+
+    /* Then load the remaining registers. Page fault can't occur. */
+    r3 = (r3 - 1) & 15;
+    tcg_gen_movi_i64(t1, 8);
+    while (r1 != r3) {
+        r1 = (r1 + 1) & 15;
+        tcg_gen_add_i64(o->in2, o->in2, t1);
+        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
+    }
+    tcg_temp_free(t1);
+
+    return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_lura(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_lura(o->out, cpu_env, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_lurag(o->out, cpu_env, o->in2);
+    return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
+{
+    o->out = o->in2;
+    o->g_out = o->g_in2;
+    TCGV_UNUSED_I64(o->in2);
+    o->g_in2 = false;
+    return NO_EXIT;
+}
+
+static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
+{
+    int b2 = get_field(s->fields, b2);
+    TCGv ar1 = tcg_temp_new_i64();
+
+    o->out = o->in2;
+    o->g_out = o->g_in2;
+    TCGV_UNUSED_I64(o->in2);
+    o->g_in2 = false;
+
+    switch (s->tb->flags & FLAG_MASK_ASC) {
+    case PSW_ASC_PRIMARY >> 32:
+        tcg_gen_movi_i64(ar1, 0);
+        break;
+    case PSW_ASC_ACCREG >> 32:
+        tcg_gen_movi_i64(ar1, 1);
+        break;
+    case PSW_ASC_SECONDARY >> 32:
+        if (b2) {
+            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
+        } else {
+            tcg_gen_movi_i64(ar1, 0);
+        }
+        break;
+    case PSW_ASC_HOME >> 32:
+        tcg_gen_movi_i64(ar1, 2);
+        break;
+    }
+
+    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
+    tcg_temp_free_i64(ar1);
+
+    return NO_EXIT;
+}
+
+static ExitStatus op_movx(DisasContext *s, DisasOps *o)
+{
+    o->out = o->in1;
+    o->out2 = o->in2;
+    o->g_out = o->g_in1;
+    o->g_out2 = o->g_in2;
+    TCGV_UNUSED_I64(o->in1);
+    TCGV_UNUSED_I64(o->in2);
+    o->g_in1 = o->g_in2 = false;
+    return NO_EXIT;
+}
+
+static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+    potential_page_fault(s);
+    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
+    tcg_temp_free_i32(l);
+    return NO_EXIT;
+}
+
+static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
+    potential_page_fault(s);
+    gen_helper_mvcl(cc_op, cpu_env, r1, r2);
+    tcg_temp_free_i32(r1);
+    tcg_temp_free_i32(r2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+    potential_page_fault(s);
+    gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
+    tcg_temp_free_i32(r1);
+    tcg_temp_free_i32(r3);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, l1);
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, l1);
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
+{
+    potential_page_fault(s);
+    gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
+{
+    potential_page_fault(s);
+    gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
+    set_cc_static(s);
+    return_low128(o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_mul(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_mul_i64(o->out, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+    return_low128(o->out2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
+    return_low128(o->out2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
+    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
+    tcg_temp_free_i64(r3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_madb(DisasContext *s, DisasOps *o)
+{
+    int r3 = get_field(s->fields, r3);
+    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
+    return NO_EXIT;
+}
+
+static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
+    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
+    tcg_temp_free_i64(r3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
+{
+    int r3 = get_field(s->fields, r3);
+    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
+    return NO_EXIT;
+}
+
+static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 z, n;
+    z = tcg_const_i64(0);
+    n = tcg_temp_new_i64();
+    tcg_gen_neg_i64(n, o->in2);
+    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
+    tcg_temp_free_i64(n);
+    tcg_temp_free_i64(z);
+    return NO_EXIT;
+}
+
+static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
+    return NO_EXIT;
+}
+
+static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
+    return NO_EXIT;
+}
+
+static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
+    tcg_gen_mov_i64(o->out2, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_nc(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+    potential_page_fault(s);
+    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
+    tcg_temp_free_i32(l);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_neg(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_neg_i64(o->out, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
+    return NO_EXIT;
+}
+
+static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
+    return NO_EXIT;
+}
+
+static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
+    tcg_gen_mov_i64(o->out2, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_oc(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+    potential_page_fault(s);
+    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
+    tcg_temp_free_i32(l);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_or(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_or_i64(o->out, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_ori(DisasContext *s, DisasOps *o)
+{
+    int shift = s->insn->data & 0xff;
+    int size = s->insn->data >> 8;
+    uint64_t mask = ((1ull << size) - 1) << shift;
+
+    assert(!o->g_in2);
+    tcg_gen_shli_i64(o->in2, o->in2, shift);
+    tcg_gen_or_i64(o->out, o->in1, o->in2);
+
+    /* Produce the CC from only the bits manipulated.  */
+    tcg_gen_andi_i64(cc_dst, o->out, mask);
+    set_cc_nz_u64(s, cc_dst);
+    return NO_EXIT;
+}
+
+static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
+{
+    gen_helper_popcnt(o->out, o->in2);
+    return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    gen_helper_ptlb(cpu_env);
+    return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
+{
+    int i3 = get_field(s->fields, i3);
+    int i4 = get_field(s->fields, i4);
+    int i5 = get_field(s->fields, i5);
+    int do_zero = i4 & 0x80;
+    uint64_t mask, imask, pmask;
+    int pos, len, rot;
+
+    /* Adjust the arguments for the specific insn.  */
+    switch (s->fields->op2) {
+    case 0x55: /* risbg */
+        i3 &= 63;
+        i4 &= 63;
+        pmask = ~0;
+        break;
+    case 0x5d: /* risbhg */
+        i3 &= 31;
+        i4 &= 31;
+        pmask = 0xffffffff00000000ull;
+        break;
+    case 0x51: /* risblg */
+        i3 &= 31;
+        i4 &= 31;
+        pmask = 0x00000000ffffffffull;
+        break;
+    default:
+        abort();
+    }
+
+    /* MASK is the set of bits to be inserted from R2.
+       Take care for I3/I4 wraparound.  */
+    mask = pmask >> i3;
+    if (i3 <= i4) {
+        mask ^= pmask >> i4 >> 1;
+    } else {
+        mask |= ~(pmask >> i4 >> 1);
+    }
+    mask &= pmask;
+
+    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
+       insns, we need to keep the other half of the register.  */
+    imask = ~mask | ~pmask;
+    if (do_zero) {
+        if (s->fields->op2 == 0x55) {
+            imask = 0;
+        } else {
+            imask = ~pmask;
+        }
+    }
+
+    /* In some cases we can implement this with deposit, which can be more
+       efficient on some hosts.  */
+    if (~mask == imask && i3 <= i4) {
+        if (s->fields->op2 == 0x5d) {
+            i3 += 32, i4 += 32;
+        }
+        /* Note that we rotate the bits to be inserted to the lsb, not to
+           the position as described in the PoO.  */
+        len = i4 - i3 + 1;
+        pos = 63 - i4;
+        rot = (i5 - pos) & 63;
+    } else {
+        pos = len = -1;
+        rot = i5 & 63;
+    }
+
+    /* Rotate the input as necessary.  */
+    tcg_gen_rotli_i64(o->in2, o->in2, rot);
+
+    /* Insert the selected bits into the output.  */
+    if (pos >= 0) {
+        tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
+    } else if (imask == 0) {
+        tcg_gen_andi_i64(o->out, o->in2, mask);
+    } else {
+        tcg_gen_andi_i64(o->in2, o->in2, mask);
+        tcg_gen_andi_i64(o->out, o->out, imask);
+        tcg_gen_or_i64(o->out, o->out, o->in2);
+    }
+    return NO_EXIT;
+}
+
+static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
+{
+    int i3 = get_field(s->fields, i3);
+    int i4 = get_field(s->fields, i4);
+    int i5 = get_field(s->fields, i5);
+    uint64_t mask;
+
+    /* If this is a test-only form, arrange to discard the result.  */
+    if (i3 & 0x80) {
+        o->out = tcg_temp_new_i64();
+        o->g_out = false;
+    }
+
+    i3 &= 63;
+    i4 &= 63;
+    i5 &= 63;
+
+    /* MASK is the set of bits to be operated on from R2.
+       Take care for I3/I4 wraparound.  */
+    mask = ~0ull >> i3;
+    if (i3 <= i4) {
+        mask ^= ~0ull >> i4 >> 1;
+    } else {
+        mask |= ~(~0ull >> i4 >> 1);
+    }
+
+    /* Rotate the input as necessary.  */
+    tcg_gen_rotli_i64(o->in2, o->in2, i5);
+
+    /* Operate.  */
+    switch (s->fields->op2) {
+    case 0x55: /* AND */
+        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
+        tcg_gen_and_i64(o->out, o->out, o->in2);
+        break;
+    case 0x56: /* OR */
+        tcg_gen_andi_i64(o->in2, o->in2, mask);
+        tcg_gen_or_i64(o->out, o->out, o->in2);
+        break;
+    case 0x57: /* XOR */
+        tcg_gen_andi_i64(o->in2, o->in2, mask);
+        tcg_gen_xor_i64(o->out, o->out, o->in2);
+        break;
+    default:
+        abort();
+    }
+
+    /* Set the CC.  */
+    tcg_gen_andi_i64(cc_dst, o->out, mask);
+    set_cc_nz_u64(s, cc_dst);
+    return NO_EXIT;
+}
+
+static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_bswap16_i64(o->out, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_bswap32_i64(o->out, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_bswap64_i64(o->out, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 t1 = tcg_temp_new_i32();
+    TCGv_i32 t2 = tcg_temp_new_i32();
+    TCGv_i32 to = tcg_temp_new_i32();
+    tcg_gen_trunc_i64_i32(t1, o->in1);
+    tcg_gen_trunc_i64_i32(t2, o->in2);
+    tcg_gen_rotl_i32(to, t1, t2);
+    tcg_gen_extu_i32_i64(o->out, to);
+    tcg_temp_free_i32(t1);
+    tcg_temp_free_i32(t2);
+    tcg_temp_free_i32(to);
+    return NO_EXIT;
+}
+
+static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    gen_helper_rrbe(cc_op, cpu_env, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    gen_helper_sacf(cpu_env, o->in2);
+    /* Addressing mode has changed, so end the block.  */
+    return EXIT_PC_STALE;
+}
+#endif
+
+static ExitStatus op_sam(DisasContext *s, DisasOps *o)
+{
+    int sam = s->insn->data;
+    TCGv_i64 tsam;
+    uint64_t mask;
+
+    switch (sam) {
+    case 0:
+        mask = 0xffffff;
+        break;
+    case 1:
+        mask = 0x7fffffff;
+        break;
+    default:
+        mask = -1;
+        break;
+    }
+
+    /* Bizarre but true, we check the address of the current insn for the
+       specification exception, not the next to be executed.  Thus the PoO
+       documents that Bad Things Happen two bytes before the end.  */
+    if (s->pc & ~mask) {
+        gen_program_exception(s, PGM_SPECIFICATION);
+        return EXIT_NORETURN;
+    }
+    s->next_pc &= mask;
+
+    tsam = tcg_const_i64(sam);
+    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
+    tcg_temp_free_i64(tsam);
+
+    /* Always exit the TB, since we (may have) changed execution mode.  */
+    return EXIT_PC_STALE;
+}
+
+static ExitStatus op_sar(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
+    return NO_EXIT;
+}
+
+static ExitStatus op_seb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+    return_low128(o->out2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_sqeb(o->out, cpu_env, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_sqdb(o->out, cpu_env, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
+    return_low128(o->out2);
+    return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_servc(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
+    tcg_temp_free_i32(r1);
+    return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_soc(DisasContext *s, DisasOps *o)
+{
+    DisasCompare c;
+    TCGv_i64 a;
+    TCGLabel *lab;
+    int r1;
+
+    disas_jcc(s, &c, get_field(s->fields, m3));
+
+    /* We want to store when the condition is fulfilled, so branch
+       out when it's not */
+    c.cond = tcg_invert_cond(c.cond);
+
+    lab = gen_new_label();
+    if (c.is_64) {
+        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
+    } else {
+        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
+    }
+    free_compare(&c);
+
+    r1 = get_field(s->fields, r1);
+    a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
+    if (s->insn->data) {
+        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
+    } else {
+        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
+    }
+    tcg_temp_free_i64(a);
+
+    gen_set_label(lab);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sla(DisasContext *s, DisasOps *o)
+{
+    uint64_t sign = 1ull << s->insn->data;
+    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
+    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
+    tcg_gen_shl_i64(o->out, o->in1, o->in2);
+    /* The arithmetic left shift is curious in that it does not affect
+       the sign bit.  Copy that over from the source unchanged.  */
+    tcg_gen_andi_i64(o->out, o->out, ~sign);
+    tcg_gen_andi_i64(o->in1, o->in1, sign);
+    tcg_gen_or_i64(o->out, o->out, o->in1);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sll(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_shl_i64(o->out, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sra(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_sar_i64(o->out, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_srl(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_shr_i64(o->out, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
+{
+    gen_helper_sfpc(cpu_env, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
+{
+    gen_helper_sfas(cpu_env, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
+{
+    int b2 = get_field(s->fields, b2);
+    int d2 = get_field(s->fields, d2);
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+    int mask, pos, len;
+
+    switch (s->fields->op2) {
+    case 0x99: /* SRNM */
+        pos = 0, len = 2;
+        break;
+    case 0xb8: /* SRNMB */
+        pos = 0, len = 3;
+        break;
+    case 0xb9: /* SRNMT */
+        pos = 4, len = 3;
+        break;
+    default:
+        tcg_abort();
+    }
+    mask = (1 << len) - 1;
+
+    /* Insert the value into the appropriate field of the FPC.  */
+    if (b2 == 0) {
+        tcg_gen_movi_i64(t1, d2 & mask);
+    } else {
+        tcg_gen_addi_i64(t1, regs[b2], d2);
+        tcg_gen_andi_i64(t1, t1, mask);
+    }
+    tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
+    tcg_gen_deposit_i64(t2, t2, t1, pos, len);
+    tcg_temp_free_i64(t1);
+
+    /* Then install the new FPC to set the rounding mode in fpu_status.  */
+    gen_helper_sfpc(cpu_env, t2);
+    tcg_temp_free_i64(t2);
+    return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_spka(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    tcg_gen_shri_i64(o->in2, o->in2, 4);
+    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sske(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    gen_helper_sske(cpu_env, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stap(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    /* ??? Surely cpu address != cpu number.  In any case the previous
+       version of this stored more than the required half-word, so it
+       is unlikely this has ever been tested.  */
+    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
+    return NO_EXIT;
+}
+
+static ExitStatus op_stck(DisasContext *s, DisasOps *o)
+{
+    gen_helper_stck(o->out, cpu_env);
+    /* ??? We don't implement clock states.  */
+    gen_op_movi_cc(s, 0);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 c1 = tcg_temp_new_i64();
+    TCGv_i64 c2 = tcg_temp_new_i64();
+    gen_helper_stck(c1, cpu_env);
+    /* Shift the 64-bit value into its place as a zero-extended
+       104-bit value.  Note that "bit positions 64-103 are always
+       non-zero so that they compare differently to STCK"; we set
+       the least significant bit to 1.  */
+    tcg_gen_shli_i64(c2, c1, 56);
+    tcg_gen_shri_i64(c1, c1, 8);
+    tcg_gen_ori_i64(c2, c2, 0x10000);
+    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
+    tcg_gen_addi_i64(o->in2, o->in2, 8);
+    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
+    tcg_temp_free_i64(c1);
+    tcg_temp_free_i64(c2);
+    /* ??? We don't implement clock states.  */
+    gen_op_movi_cc(s, 0);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    gen_helper_sckc(cpu_env, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    gen_helper_stckc(o->out, cpu_env);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_stctg(cpu_env, r1, o->in2, r3);
+    tcg_temp_free_i32(r1);
+    tcg_temp_free_i32(r3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_stctl(cpu_env, r1, o->in2, r3);
+    tcg_temp_free_i32(r1);
+    tcg_temp_free_i32(r3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 t1 = tcg_temp_new_i64();
+
+    check_privileged(s);
+    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
+    tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
+    tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
+    tcg_temp_free_i64(t1);
+
+    return NO_EXIT;
+}
+
+static ExitStatus op_spt(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    gen_helper_spt(cpu_env, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
+{
+    TCGv_i64 f, a;
+    /* We really ought to have more complete indication of facilities
+       that we implement.  Address this when STFLE is implemented.  */
+    check_privileged(s);
+    f = tcg_const_i64(0xc0000000);
+    a = tcg_const_i64(200);
+    tcg_gen_qemu_st32(f, a, get_mem_index(s));
+    tcg_temp_free_i64(f);
+    tcg_temp_free_i64(a);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    gen_helper_stpt(o->out, cpu_env);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_spx(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    gen_helper_spx(cpu_env, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_xsch(cpu_env, regs[1]);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_csch(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_csch(cpu_env, regs[1]);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_hsch(cpu_env, regs[1]);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_msch(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_msch(cpu_env, regs[1], o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_rchp(cpu_env, regs[1]);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_rsch(cpu_env, regs[1]);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_ssch(cpu_env, regs[1], o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_stsch(cpu_env, regs[1], o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_tsch(cpu_env, regs[1], o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_chsc(cpu_env, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
+    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
+{
+    uint64_t i2 = get_field(s->fields, i2);
+    TCGv_i64 t;
+
+    check_privileged(s);
+
+    /* It is important to do what the instruction name says: STORE THEN.
+       If we let the output hook perform the store then if we fault and
+       restart, we'll have the wrong SYSTEM MASK in place.  */
+    t = tcg_temp_new_i64();
+    tcg_gen_shri_i64(t, psw_mask, 56);
+    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
+    tcg_temp_free_i64(t);
+
+    if (s->fields->op == 0xac) {
+        tcg_gen_andi_i64(psw_mask, psw_mask,
+                         (i2 << 56) | 0x00ffffffffffffffull);
+    } else {
+        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
+    }
+    return NO_EXIT;
+}
+
+static ExitStatus op_stura(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_stura(cpu_env, o->in2, o->in1);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
+{
+    check_privileged(s);
+    potential_page_fault(s);
+    gen_helper_sturg(cpu_env, o->in2, o->in1);
+    return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_st8(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
+    return NO_EXIT;
+}
+
+static ExitStatus op_st16(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
+    return NO_EXIT;
+}
+
+static ExitStatus op_st32(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
+    return NO_EXIT;
+}
+
+static ExitStatus op_st64(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
+    return NO_EXIT;
+}
+
+static ExitStatus op_stam(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+    potential_page_fault(s);
+    gen_helper_stam(cpu_env, r1, o->in2, r3);
+    tcg_temp_free_i32(r1);
+    tcg_temp_free_i32(r3);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
+{
+    int m3 = get_field(s->fields, m3);
+    int pos, base = s->insn->data;
+    TCGv_i64 tmp = tcg_temp_new_i64();
+
+    pos = base + ctz32(m3) * 8;
+    switch (m3) {
+    case 0xf:
+        /* Effectively a 32-bit store.  */
+        tcg_gen_shri_i64(tmp, o->in1, pos);
+        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
+        break;
+
+    case 0xc:
+    case 0x6:
+    case 0x3:
+        /* Effectively a 16-bit store.  */
+        tcg_gen_shri_i64(tmp, o->in1, pos);
+        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
+        break;
+
+    case 0x8:
+    case 0x4:
+    case 0x2:
+    case 0x1:
+        /* Effectively an 8-bit store.  */
+        tcg_gen_shri_i64(tmp, o->in1, pos);
+        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
+        break;
+
+    default:
+        /* This is going to be a sequence of shifts and stores.  */
+        pos = base + 32 - 8;
+        while (m3) {
+            if (m3 & 0x8) {
+                tcg_gen_shri_i64(tmp, o->in1, pos);
+                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
+                tcg_gen_addi_i64(o->in2, o->in2, 1);
+            }
+            m3 = (m3 << 1) & 0xf;
+            pos -= 8;
+        }
+        break;
+    }
+    tcg_temp_free_i64(tmp);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stm(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    int r3 = get_field(s->fields, r3);
+    int size = s->insn->data;
+    TCGv_i64 tsize = tcg_const_i64(size);
+
+    while (1) {
+        if (size == 8) {
+            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
+        } else {
+            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
+        }
+        if (r1 == r3) {
+            break;
+        }
+        tcg_gen_add_i64(o->in2, o->in2, tsize);
+        r1 = (r1 + 1) & 15;
+    }
+
+    tcg_temp_free_i64(tsize);
+    return NO_EXIT;
+}
+
+static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
+{
+    int r1 = get_field(s->fields, r1);
+    int r3 = get_field(s->fields, r3);
+    TCGv_i64 t = tcg_temp_new_i64();
+    TCGv_i64 t4 = tcg_const_i64(4);
+    TCGv_i64 t32 = tcg_const_i64(32);
+
+    while (1) {
+        tcg_gen_shl_i64(t, regs[r1], t32);
+        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
+        if (r1 == r3) {
+            break;
+        }
+        tcg_gen_add_i64(o->in2, o->in2, t4);
+        r1 = (r1 + 1) & 15;
+    }
+
+    tcg_temp_free_i64(t);
+    tcg_temp_free_i64(t4);
+    tcg_temp_free_i64(t32);
+    return NO_EXIT;
+}
+
+static ExitStatus op_srst(DisasContext *s, DisasOps *o)
+{
+    potential_page_fault(s);
+    gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
+    set_cc_static(s);
+    return_low128(o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_sub(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_sub_i64(o->out, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_subb(DisasContext *s, DisasOps *o)
+{
+    DisasCompare cmp;
+    TCGv_i64 borrow;
+
+    tcg_gen_sub_i64(o->out, o->in1, o->in2);
+
+    /* The !borrow flag is the msb of CC.  Since we want the inverse of
+       that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4.  */
+    disas_jcc(s, &cmp, 8 | 4);
+    borrow = tcg_temp_new_i64();
+    if (cmp.is_64) {
+        tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
+    } else {
+        TCGv_i32 t = tcg_temp_new_i32();
+        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
+        tcg_gen_extu_i32_i64(borrow, t);
+        tcg_temp_free_i32(t);
+    }
+    free_compare(&cmp);
+
+    tcg_gen_sub_i64(o->out, o->out, borrow);
+    tcg_temp_free_i64(borrow);
+    return NO_EXIT;
+}
+
+static ExitStatus op_svc(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 t;
+
+    update_psw_addr(s);
+    update_cc_op(s);
+
+    t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
+    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
+    tcg_temp_free_i32(t);
+
+    t = tcg_const_i32(s->next_pc - s->pc);
+    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
+    tcg_temp_free_i32(t);
+
+    gen_exception(EXCP_SVC);
+    return EXIT_NORETURN;
+}
+
+static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_tceb(cc_op, o->in1, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_tcdb(cc_op, o->in1, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
+{
+    gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
+{
+    potential_page_fault(s);
+    gen_helper_tprot(cc_op, o->addr1, o->in2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_tr(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+    potential_page_fault(s);
+    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
+    tcg_temp_free_i32(l);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_tre(DisasContext *s, DisasOps *o)
+{
+    potential_page_fault(s);
+    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
+    return_low128(o->out2);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_trt(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+    potential_page_fault(s);
+    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
+    tcg_temp_free_i32(l);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
+{
+    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+    potential_page_fault(s);
+    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
+    tcg_temp_free_i32(l);
+    return NO_EXIT;
+}
+
+static ExitStatus op_xc(DisasContext *s, DisasOps *o)
+{
+    int d1 = get_field(s->fields, d1);
+    int d2 = get_field(s->fields, d2);
+    int b1 = get_field(s->fields, b1);
+    int b2 = get_field(s->fields, b2);
+    int l = get_field(s->fields, l1);
+    TCGv_i32 t32;
+
+    o->addr1 = get_address(s, 0, b1, d1);
+
+    /* If the addresses are identical, this is a store/memset of zero.  */
+    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
+        o->in2 = tcg_const_i64(0);
+
+        l++;
+        while (l >= 8) {
+            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
+            l -= 8;
+            if (l > 0) {
+                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
+            }
+        }
+        if (l >= 4) {
+            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
+            l -= 4;
+            if (l > 0) {
+                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
+            }
+        }
+        if (l >= 2) {
+            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
+            l -= 2;
+            if (l > 0) {
+                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
+            }
+        }
+        if (l) {
+            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
+        }
+        gen_op_movi_cc(s, 0);
+        return NO_EXIT;
+    }
+
+    /* But in general we'll defer to a helper.  */
+    o->in2 = get_address(s, 0, b2, d2);
+    t32 = tcg_const_i32(l);
+    potential_page_fault(s);
+    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
+    tcg_temp_free_i32(t32);
+    set_cc_static(s);
+    return NO_EXIT;
+}
+
+static ExitStatus op_xor(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_xor_i64(o->out, o->in1, o->in2);
+    return NO_EXIT;
+}
+
+static ExitStatus op_xori(DisasContext *s, DisasOps *o)
+{
+    int shift = s->insn->data & 0xff;
+    int size = s->insn->data >> 8;
+    uint64_t mask = ((1ull << size) - 1) << shift;
+
+    assert(!o->g_in2);
+    tcg_gen_shli_i64(o->in2, o->in2, shift);
+    tcg_gen_xor_i64(o->out, o->in1, o->in2);
+
+    /* Produce the CC from only the bits manipulated.  */
+    tcg_gen_andi_i64(cc_dst, o->out, mask);
+    set_cc_nz_u64(s, cc_dst);
+    return NO_EXIT;
+}
+
+static ExitStatus op_zero(DisasContext *s, DisasOps *o)
+{
+    o->out = tcg_const_i64(0);
+    return NO_EXIT;
+}
+
+static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
+{
+    o->out = tcg_const_i64(0);
+    o->out2 = o->out;
+    o->g_out2 = true;
+    return NO_EXIT;
+}
+
+/* ====================================================================== */
+/* The "Cc OUTput" generators.  Given the generated output (and in some cases
+   the original inputs), update the various cc data structures in order to
+   be able to compute the new condition code.  */
+
+static void cout_abs32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
+}
+
+static void cout_abs64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
+}
+
+static void cout_adds32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
+}
+
+static void cout_adds64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
+}
+
+static void cout_addu32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
+}
+
+static void cout_addu64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
+}
+
+static void cout_addc32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
+}
+
+static void cout_addc64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
+}
+
+static void cout_cmps32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
+}
+
+static void cout_cmps64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
+}
+
+static void cout_cmpu32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
+}
+
+static void cout_cmpu64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
+}
+
+static void cout_f32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
+}
+
+static void cout_f64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
+}
+
+static void cout_f128(DisasContext *s, DisasOps *o)
+{
+    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
+}
+
+static void cout_nabs32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
+}
+
+static void cout_nabs64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
+}
+
+static void cout_neg32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
+}
+
+static void cout_neg64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
+}
+
+static void cout_nz32(DisasContext *s, DisasOps *o)
+{
+    tcg_gen_ext32u_i64(cc_dst, o->out);
+    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
+}
+
+static void cout_nz64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
+}
+
+static void cout_s32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
+}
+
+static void cout_s64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
+}
+
+static void cout_subs32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
+}
+
+static void cout_subs64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
+}
+
+static void cout_subu32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
+}
+
+static void cout_subu64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
+}
+
+static void cout_subb32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
+}
+
+static void cout_subb64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
+}
+
+static void cout_tm32(DisasContext *s, DisasOps *o)
+{
+    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
+}
+
+static void cout_tm64(DisasContext *s, DisasOps *o)
+{
+    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
+}
+
+/* ====================================================================== */
+/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
+   with the TCG register to which we will write.  Used in combination with
+   the "wout" generators, in some cases we need a new temporary, and in
+   some cases we can write to a TCG global.  */
+
+static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->out = tcg_temp_new_i64();
+}
+#define SPEC_prep_new 0
+
+static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->out = tcg_temp_new_i64();
+    o->out2 = tcg_temp_new_i64();
+}
+#define SPEC_prep_new_P 0
+
+static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->out = regs[get_field(f, r1)];
+    o->g_out = true;
+}
+#define SPEC_prep_r1 0
+
+static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r1 = get_field(f, r1);
+    o->out = regs[r1];
+    o->out2 = regs[r1 + 1];
+    o->g_out = o->g_out2 = true;
+}
+#define SPEC_prep_r1_P SPEC_r1_even
+
+static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->out = fregs[get_field(f, r1)];
+    o->g_out = true;
+}
+#define SPEC_prep_f1 0
+
+static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r1 = get_field(f, r1);
+    o->out = fregs[r1];
+    o->out2 = fregs[r1 + 2];
+    o->g_out = o->g_out2 = true;
+}
+#define SPEC_prep_x1 SPEC_r1_f128
+
+/* ====================================================================== */
+/* The "Write OUTput" generators.  These generally perform some non-trivial
+   copy of data to TCG globals, or to main memory.  The trivial cases are
+   generally handled by having a "prep" generator install the TCG global
+   as the destination of the operation.  */
+
+static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    store_reg(get_field(f, r1), o->out);
+}
+#define SPEC_wout_r1 0
+
+static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r1 = get_field(f, r1);
+    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
+}
+#define SPEC_wout_r1_8 0
+
+static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r1 = get_field(f, r1);
+    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
+}
+#define SPEC_wout_r1_16 0
+
+static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    store_reg32_i64(get_field(f, r1), o->out);
+}
+#define SPEC_wout_r1_32 0
+
+static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    store_reg32h_i64(get_field(f, r1), o->out);
+}
+#define SPEC_wout_r1_32h 0
+
+static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r1 = get_field(f, r1);
+    store_reg32_i64(r1, o->out);
+    store_reg32_i64(r1 + 1, o->out2);
+}
+#define SPEC_wout_r1_P32 SPEC_r1_even
+
+static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r1 = get_field(f, r1);
+    store_reg32_i64(r1 + 1, o->out);
+    tcg_gen_shri_i64(o->out, o->out, 32);
+    store_reg32_i64(r1, o->out);
+}
+#define SPEC_wout_r1_D32 SPEC_r1_even
+
+static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    store_freg32_i64(get_field(f, r1), o->out);
+}
+#define SPEC_wout_e1 0
+
+static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    store_freg(get_field(f, r1), o->out);
+}
+#define SPEC_wout_f1 0
+
+static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int f1 = get_field(s->fields, r1);
+    store_freg(f1, o->out);
+    store_freg(f1 + 2, o->out2);
+}
+#define SPEC_wout_x1 SPEC_r1_f128
+
+static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    if (get_field(f, r1) != get_field(f, r2)) {
+        store_reg32_i64(get_field(f, r1), o->out);
+    }
+}
+#define SPEC_wout_cond_r1r2_32 0
+
+static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    if (get_field(f, r1) != get_field(f, r2)) {
+        store_freg32_i64(get_field(f, r1), o->out);
+    }
+}
+#define SPEC_wout_cond_e1e2 0
+
+static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_8 0
+
+static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_16 0
+
+static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_32 0
+
+static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_64 0
+
+static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
+}
+#define SPEC_wout_m2_32 0
+
+static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    /* XXX release reservation */
+    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
+    store_reg32_i64(get_field(f, r1), o->in2);
+}
+#define SPEC_wout_m2_32_r1_atomic 0
+
+static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    /* XXX release reservation */
+    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
+    store_reg(get_field(f, r1), o->in2);
+}
+#define SPEC_wout_m2_64_r1_atomic 0
+
+/* ====================================================================== */
+/* The "INput 1" generators.  These load the first operand to an insn.  */
+
+static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = load_reg(get_field(f, r1));
+}
+#define SPEC_in1_r1 0
+
+static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = regs[get_field(f, r1)];
+    o->g_in1 = true;
+}
+#define SPEC_in1_r1_o 0
+
+static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
+}
+#define SPEC_in1_r1_32s 0
+
+static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
+}
+#define SPEC_in1_r1_32u 0
+
+static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
+}
+#define SPEC_in1_r1_sr32 0
+
+static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = load_reg(get_field(f, r1) + 1);
+}
+#define SPEC_in1_r1p1 SPEC_r1_even
+
+static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
+}
+#define SPEC_in1_r1p1_32s SPEC_r1_even
+
+static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
+}
+#define SPEC_in1_r1p1_32u SPEC_r1_even
+
+static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r1 = get_field(f, r1);
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
+}
+#define SPEC_in1_r1_D32 SPEC_r1_even
+
+static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = load_reg(get_field(f, r2));
+}
+#define SPEC_in1_r2 0
+
+static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
+}
+#define SPEC_in1_r2_sr32 0
+
+static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = load_reg(get_field(f, r3));
+}
+#define SPEC_in1_r3 0
+
+static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = regs[get_field(f, r3)];
+    o->g_in1 = true;
+}
+#define SPEC_in1_r3_o 0
+
+static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
+}
+#define SPEC_in1_r3_32s 0
+
+static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
+}
+#define SPEC_in1_r3_32u 0
+
+static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r3 = get_field(f, r3);
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
+}
+#define SPEC_in1_r3_D32 SPEC_r3_even
+
+static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = load_freg32_i64(get_field(f, r1));
+}
+#define SPEC_in1_e1 0
+
+static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = fregs[get_field(f, r1)];
+    o->g_in1 = true;
+}
+#define SPEC_in1_f1_o 0
+
+static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r1 = get_field(f, r1);
+    o->out = fregs[r1];
+    o->out2 = fregs[r1 + 2];
+    o->g_out = o->g_out2 = true;
+}
+#define SPEC_in1_x1_o SPEC_r1_f128
+
+static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in1 = fregs[get_field(f, r3)];
+    o->g_in1 = true;
+}
+#define SPEC_in1_f3_o 0
+
+static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
+}
+#define SPEC_in1_la1 0
+
+static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
+    o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
+}
+#define SPEC_in1_la2 0
+
+static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in1_la1(s, f, o);
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_8u 0
+
+static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in1_la1(s, f, o);
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_16s 0
+
+static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in1_la1(s, f, o);
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_16u 0
+
+static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in1_la1(s, f, o);
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_32s 0
+
+static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in1_la1(s, f, o);
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_32u 0
+
+static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in1_la1(s, f, o);
+    o->in1 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_64 0
+
+/* ====================================================================== */
+/* The "INput 2" generators.  These load the second operand to an insn.  */
+
+static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = regs[get_field(f, r1)];
+    o->g_in2 = true;
+}
+#define SPEC_in2_r1_o 0
+
+static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
+}
+#define SPEC_in2_r1_16u 0
+
+static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
+}
+#define SPEC_in2_r1_32u 0
+
+static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r1 = get_field(f, r1);
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
+}
+#define SPEC_in2_r1_D32 SPEC_r1_even
+
+static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = load_reg(get_field(f, r2));
+}
+#define SPEC_in2_r2 0
+
+static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = regs[get_field(f, r2)];
+    o->g_in2 = true;
+}
+#define SPEC_in2_r2_o 0
+
+static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r2 = get_field(f, r2);
+    if (r2 != 0) {
+        o->in2 = load_reg(r2);
+    }
+}
+#define SPEC_in2_r2_nz 0
+
+static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_8s 0
+
+static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_8u 0
+
+static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_16s 0
+
+static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_16u 0
+
+static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = load_reg(get_field(f, r3));
+}
+#define SPEC_in2_r3 0
+
+static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
+}
+#define SPEC_in2_r3_sr32 0
+
+static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_32s 0
+
+static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_32u 0
+
+static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
+}
+#define SPEC_in2_r2_sr32 0
+
+static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = load_freg32_i64(get_field(f, r2));
+}
+#define SPEC_in2_e2 0
+
+static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = fregs[get_field(f, r2)];
+    o->g_in2 = true;
+}
+#define SPEC_in2_f2_o 0
+
+static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int r2 = get_field(f, r2);
+    o->in1 = fregs[r2];
+    o->in2 = fregs[r2 + 2];
+    o->g_in1 = o->g_in2 = true;
+}
+#define SPEC_in2_x2_o SPEC_r2_f128
+
+static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = get_address(s, 0, get_field(f, r2), 0);
+}
+#define SPEC_in2_ra2 0
+
+static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
+    o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
+}
+#define SPEC_in2_a2 0
+
+static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
+}
+#define SPEC_in2_ri2 0
+
+static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    help_l2_shift(s, f, o, 31);
+}
+#define SPEC_in2_sh32 0
+
+static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    help_l2_shift(s, f, o, 63);
+}
+#define SPEC_in2_sh64 0
+
+static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in2_a2(s, f, o);
+    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_8u 0
+
+static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in2_a2(s, f, o);
+    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_16s 0
+
+static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in2_a2(s, f, o);
+    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_16u 0
+
+static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in2_a2(s, f, o);
+    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_32s 0
+
+static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in2_a2(s, f, o);
+    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_32u 0
+
+static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in2_a2(s, f, o);
+    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_64 0
+
+static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in2_ri2(s, f, o);
+    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_16u 0
+
+static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in2_ri2(s, f, o);
+    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_32s 0
+
+static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in2_ri2(s, f, o);
+    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_32u 0
+
+static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    in2_ri2(s, f, o);
+    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_64 0
+
+static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    /* XXX should reserve the address */
+    in1_la2(s, f, o);
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
+}
+#define SPEC_in2_m2_32s_atomic 0
+
+static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    /* XXX should reserve the address */
+    in1_la2(s, f, o);
+    o->in2 = tcg_temp_new_i64();
+    tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
+}
+#define SPEC_in2_m2_64_atomic 0
+
+static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_const_i64(get_field(f, i2));
+}
+#define SPEC_in2_i2 0
+
+static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
+}
+#define SPEC_in2_i2_8u 0
+
+static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
+}
+#define SPEC_in2_i2_16u 0
+
+static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
+}
+#define SPEC_in2_i2_32u 0
+
+static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    uint64_t i2 = (uint16_t)get_field(f, i2);
+    o->in2 = tcg_const_i64(i2 << s->insn->data);
+}
+#define SPEC_in2_i2_16u_shl 0
+
+static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    uint64_t i2 = (uint32_t)get_field(f, i2);
+    o->in2 = tcg_const_i64(i2 << s->insn->data);
+}
+#define SPEC_in2_i2_32u_shl 0
+
+#ifndef CONFIG_USER_ONLY
+static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+    o->in2 = tcg_const_i64(s->fields->raw_insn);
+}
+#define SPEC_in2_insn 0
+#endif
+
+/* ====================================================================== */
+
+/* Find opc within the table of insns.  This is formulated as a switch
+   statement so that (1) we get compile-time notice of cut-paste errors
+   for duplicated opcodes, and (2) the compiler generates the binary
+   search tree, rather than us having to post-process the table.  */
+
+#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
+    D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
+
+#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
+
+enum DisasInsnEnum {
+#include "insn-data.def"
+};
+
+#undef D
+#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) {                       \
+    .opc = OPC,                                                             \
+    .fmt = FMT_##FT,                                                        \
+    .fac = FAC_##FC,                                                        \
+    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
+    .name = #NM,                                                            \
+    .help_in1 = in1_##I1,                                                   \
+    .help_in2 = in2_##I2,                                                   \
+    .help_prep = prep_##P,                                                  \
+    .help_wout = wout_##W,                                                  \
+    .help_cout = cout_##CC,                                                 \
+    .help_op = op_##OP,                                                     \
+    .data = D                                                               \
+ },
+
+/* Allow 0 to be used for NULL in the table below.  */
+#define in1_0  NULL
+#define in2_0  NULL
+#define prep_0  NULL
+#define wout_0  NULL
+#define cout_0  NULL
+#define op_0  NULL
+
+#define SPEC_in1_0 0
+#define SPEC_in2_0 0
+#define SPEC_prep_0 0
+#define SPEC_wout_0 0
+
+static const DisasInsn insn_info[] = {
+#include "insn-data.def"
+};
+
+#undef D
+#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
+    case OPC: return &insn_info[insn_ ## NM];
+
+static const DisasInsn *lookup_opc(uint16_t opc)
+{
+    switch (opc) {
+#include "insn-data.def"
+    default:
+        return NULL;
+    }
+}
+
+#undef D
+#undef C
+
+/* Extract a field from the insn.  The INSN should be left-aligned in
+   the uint64_t so that we can more easily utilize the big-bit-endian
+   definitions we extract from the Principals of Operation.  */
+
+static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
+{
+    uint32_t r, m;
+
+    if (f->size == 0) {
+        return;
+    }
+
+    /* Zero extract the field from the insn.  */
+    r = (insn << f->beg) >> (64 - f->size);
+
+    /* Sign-extend, or un-swap the field as necessary.  */
+    switch (f->type) {
+    case 0: /* unsigned */
+        break;
+    case 1: /* signed */
+        assert(f->size <= 32);
+        m = 1u << (f->size - 1);
+        r = (r ^ m) - m;
+        break;
+    case 2: /* dl+dh split, signed 20 bit. */
+        r = ((int8_t)r << 12) | (r >> 8);
+        break;
+    default:
+        abort();
+    }
+
+    /* Validate that the "compressed" encoding we selected above is valid.
+       I.e. we havn't make two different original fields overlap.  */
+    assert(((o->presentC >> f->indexC) & 1) == 0);
+    o->presentC |= 1 << f->indexC;
+    o->presentO |= 1 << f->indexO;
+
+    o->c[f->indexC] = r;
+}
+
+/* Lookup the insn at the current PC, extracting the operands into O and
+   returning the info struct for the insn.  Returns NULL for invalid insn.  */
+
+static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
+                                     DisasFields *f)
+{
+    uint64_t insn, pc = s->pc;
+    int op, op2, ilen;
+    const DisasInsn *info;
+
+    insn = ld_code2(env, pc);
+    op = (insn >> 8) & 0xff;
+    ilen = get_ilen(op);
+    s->next_pc = s->pc + ilen;
+
+    switch (ilen) {
+    case 2:
+        insn = insn << 48;
+        break;
+    case 4:
+        insn = ld_code4(env, pc) << 32;
+        break;
+    case 6:
+        insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
+        break;
+    default:
+        abort();
+    }
+
+    /* We can't actually determine the insn format until we've looked up
+       the full insn opcode.  Which we can't do without locating the
+       secondary opcode.  Assume by default that OP2 is at bit 40; for
+       those smaller insns that don't actually have a secondary opcode
+       this will correctly result in OP2 = 0. */
+    switch (op) {
+    case 0x01: /* E */
+    case 0x80: /* S */
+    case 0x82: /* S */
+    case 0x93: /* S */
+    case 0xb2: /* S, RRF, RRE */
+    case 0xb3: /* RRE, RRD, RRF */
+    case 0xb9: /* RRE, RRF */
+    case 0xe5: /* SSE, SIL */
+        op2 = (insn << 8) >> 56;
+        break;
+    case 0xa5: /* RI */
+    case 0xa7: /* RI */
+    case 0xc0: /* RIL */
+    case 0xc2: /* RIL */
+    case 0xc4: /* RIL */
+    case 0xc6: /* RIL */
+    case 0xc8: /* SSF */
+    case 0xcc: /* RIL */
+        op2 = (insn << 12) >> 60;
+        break;
+    case 0xd0 ... 0xdf: /* SS */
+    case 0xe1: /* SS */
+    case 0xe2: /* SS */
+    case 0xe8: /* SS */
+    case 0xe9: /* SS */
+    case 0xea: /* SS */
+    case 0xee ... 0xf3: /* SS */
+    case 0xf8 ... 0xfd: /* SS */
+        op2 = 0;
+        break;
+    default:
+        op2 = (insn << 40) >> 56;
+        break;
+    }
+
+    memset(f, 0, sizeof(*f));
+    f->raw_insn = insn;
+    f->op = op;
+    f->op2 = op2;
+
+    /* Lookup the instruction.  */
+    info = lookup_opc(op << 8 | op2);
+
+    /* If we found it, extract the operands.  */
+    if (info != NULL) {
+        DisasFormat fmt = info->fmt;
+        int i;
+
+        for (i = 0; i < NUM_C_FIELD; ++i) {
+            extract_field(f, &format_info[fmt].op[i], insn);
+        }
+    }
+    return info;
+}
+
+static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
+{
+    const DisasInsn *insn;
+    ExitStatus ret = NO_EXIT;
+    DisasFields f;
+    DisasOps o;
+
+    /* Search for the insn in the table.  */
+    insn = extract_insn(env, s, &f);
+
+    /* Not found means unimplemented/illegal opcode.  */
+    if (insn == NULL) {
+        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
+                      f.op, f.op2);
+        gen_illegal_opcode(s);
+        return EXIT_NORETURN;
+    }
+
+#ifndef CONFIG_USER_ONLY
+    if (s->tb->flags & FLAG_MASK_PER) {
+        TCGv_i64 addr = tcg_const_i64(s->pc);
+        gen_helper_per_ifetch(cpu_env, addr);
+        tcg_temp_free_i64(addr);
+    }
+#endif
+
+    /* Check for insn specification exceptions.  */
+    if (insn->spec) {
+        int spec = insn->spec, excp = 0, r;
+
+        if (spec & SPEC_r1_even) {
+            r = get_field(&f, r1);
+            if (r & 1) {
+                excp = PGM_SPECIFICATION;
+            }
+        }
+        if (spec & SPEC_r2_even) {
+            r = get_field(&f, r2);
+            if (r & 1) {
+                excp = PGM_SPECIFICATION;
+            }
+        }
+        if (spec & SPEC_r3_even) {
+            r = get_field(&f, r3);
+            if (r & 1) {
+                excp = PGM_SPECIFICATION;
+            }
+        }
+        if (spec & SPEC_r1_f128) {
+            r = get_field(&f, r1);
+            if (r > 13) {
+                excp = PGM_SPECIFICATION;
+            }
+        }
+        if (spec & SPEC_r2_f128) {
+            r = get_field(&f, r2);
+            if (r > 13) {
+                excp = PGM_SPECIFICATION;
+            }
+        }
+        if (excp) {
+            gen_program_exception(s, excp);
+            return EXIT_NORETURN;
+        }
+    }
+
+    /* Set up the strutures we use to communicate with the helpers. */
+    s->insn = insn;
+    s->fields = &f;
+    o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
+    TCGV_UNUSED_I64(o.out);
+    TCGV_UNUSED_I64(o.out2);
+    TCGV_UNUSED_I64(o.in1);
+    TCGV_UNUSED_I64(o.in2);
+    TCGV_UNUSED_I64(o.addr1);
+
+    /* Implement the instruction.  */
+    if (insn->help_in1) {
+        insn->help_in1(s, &f, &o);
+    }
+    if (insn->help_in2) {
+        insn->help_in2(s, &f, &o);
+    }
+    if (insn->help_prep) {
+        insn->help_prep(s, &f, &o);
+    }
+    if (insn->help_op) {
+        ret = insn->help_op(s, &o);
+    }
+    if (insn->help_wout) {
+        insn->help_wout(s, &f, &o);
+    }
+    if (insn->help_cout) {
+        insn->help_cout(s, &o);
+    }
+
+    /* Free any temporaries created by the helpers.  */
+    if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
+        tcg_temp_free_i64(o.out);
+    }
+    if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
+        tcg_temp_free_i64(o.out2);
+    }
+    if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
+        tcg_temp_free_i64(o.in1);
+    }
+    if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
+        tcg_temp_free_i64(o.in2);
+    }
+    if (!TCGV_IS_UNUSED_I64(o.addr1)) {
+        tcg_temp_free_i64(o.addr1);
+    }
+
+#ifndef CONFIG_USER_ONLY
+    if (s->tb->flags & FLAG_MASK_PER) {
+        /* An exception might be triggered, save PSW if not already done.  */
+        if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
+            tcg_gen_movi_i64(psw_addr, s->next_pc);
+        }
+
+        /* Save off cc.  */
+        update_cc_op(s);
+
+        /* Call the helper to check for a possible PER exception.  */
+        gen_helper_per_check_exception(cpu_env);
+    }
+#endif
+
+    /* Advance to the next instruction.  */
+    s->pc = s->next_pc;
+    return ret;
+}
+
+static inline void gen_intermediate_code_internal(S390CPU *cpu,
+                                                  TranslationBlock *tb,
+                                                  bool search_pc)
+{
+    CPUState *cs = CPU(cpu);
+    CPUS390XState *env = &cpu->env;
+    DisasContext dc;
+    target_ulong pc_start;
+    uint64_t next_page_start;
+    int j, lj = -1;
+    int num_insns, max_insns;
+    CPUBreakpoint *bp;
+    ExitStatus status;
+    bool do_debug;
+
+    pc_start = tb->pc;
+
+    /* 31-bit mode */
+    if (!(tb->flags & FLAG_MASK_64)) {
+        pc_start &= 0x7fffffff;
+    }
+
+    dc.tb = tb;
+    dc.pc = pc_start;
+    dc.cc_op = CC_OP_DYNAMIC;
+    do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
+
+    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+
+    num_insns = 0;
+    max_insns = tb->cflags & CF_COUNT_MASK;
+    if (max_insns == 0) {
+        max_insns = CF_COUNT_MASK;
+    }
+
+    gen_tb_start(tb);
+
+    do {
+        if (search_pc) {
+            j = tcg_op_buf_count();
+            if (lj < j) {
+                lj++;
+                while (lj < j) {
+                    tcg_ctx.gen_opc_instr_start[lj++] = 0;
+                }
+            }
+            tcg_ctx.gen_opc_pc[lj] = dc.pc;
+            gen_opc_cc_op[lj] = dc.cc_op;
+            tcg_ctx.gen_opc_instr_start[lj] = 1;
+            tcg_ctx.gen_opc_icount[lj] = num_insns;
+        }
+        if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+            gen_io_start();
+        }
+
+        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
+            tcg_gen_debug_insn_start(dc.pc);
+        }
+
+        status = NO_EXIT;
+        if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+            QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+                if (bp->pc == dc.pc) {
+                    status = EXIT_PC_STALE;
+                    do_debug = true;
+                    break;
+                }
+            }
+        }
+        if (status == NO_EXIT) {
+            status = translate_one(env, &dc);
+        }
+
+        /* If we reach a page boundary, are single stepping,
+           or exhaust instruction count, stop generation.  */
+        if (status == NO_EXIT
+            && (dc.pc >= next_page_start
+                || tcg_op_buf_full()
+                || num_insns >= max_insns
+                || singlestep
+                || cs->singlestep_enabled)) {
+            status = EXIT_PC_STALE;
+        }
+    } while (status == NO_EXIT);
+
+    if (tb->cflags & CF_LAST_IO) {
+        gen_io_end();
+    }
+
+    switch (status) {
+    case EXIT_GOTO_TB:
+    case EXIT_NORETURN:
+        break;
+    case EXIT_PC_STALE:
+        update_psw_addr(&dc);
+        /* FALLTHRU */
+    case EXIT_PC_UPDATED:
+        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
+           cc op type is in env */
+        update_cc_op(&dc);
+        /* Exit the TB, either by raising a debug exception or by return.  */
+        if (do_debug) {
+            gen_exception(EXCP_DEBUG);
+        } else {
+            tcg_gen_exit_tb(0);
+        }
+        break;
+    default:
+        abort();
+    }
+
+    gen_tb_end(tb, num_insns);
+
+    if (search_pc) {
+        j = tcg_op_buf_count();
+        lj++;
+        while (lj <= j) {
+            tcg_ctx.gen_opc_instr_start[lj++] = 0;
+        }
+    } else {
+        tb->size = dc.pc - pc_start;
+        tb->icount = num_insns;
+    }
+
+#if defined(S390X_DEBUG_DISAS)
+    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+        qemu_log("IN: %s\n", lookup_symbol(pc_start));
+        log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
+        qemu_log("\n");
+    }
+#endif
+}
+
+void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
+{
+    gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
+}
+
+void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
+{
+    gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
+}
+
+void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
+{
+    int cc_op;
+    env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
+    cc_op = gen_opc_cc_op[pc_pos];
+    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
+        env->cc_op = cc_op;
+    }
+}
-- 
cgit