summaryrefslogtreecommitdiffstats
path: root/kernel/arch/s390/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/s390/include/asm')
-rw-r--r--kernel/arch/s390/include/asm/Kbuild8
-rw-r--r--kernel/arch/s390/include/asm/airq.h103
-rw-r--r--kernel/arch/s390/include/asm/appldata.h64
-rw-r--r--kernel/arch/s390/include/asm/asm-offsets.h1
-rw-r--r--kernel/arch/s390/include/asm/atomic.h320
-rw-r--r--kernel/arch/s390/include/asm/barrier.h56
-rw-r--r--kernel/arch/s390/include/asm/bitops.h454
-rw-r--r--kernel/arch/s390/include/asm/bug.h71
-rw-r--r--kernel/arch/s390/include/asm/bugs.h20
-rw-r--r--kernel/arch/s390/include/asm/cache.h18
-rw-r--r--kernel/arch/s390/include/asm/cacheflush.h12
-rw-r--r--kernel/arch/s390/include/asm/ccwdev.h233
-rw-r--r--kernel/arch/s390/include/asm/ccwgroup.h73
-rw-r--r--kernel/arch/s390/include/asm/checksum.h140
-rw-r--r--kernel/arch/s390/include/asm/chpid.h50
-rw-r--r--kernel/arch/s390/include/asm/cio.h315
-rw-r--r--kernel/arch/s390/include/asm/clp.h28
-rw-r--r--kernel/arch/s390/include/asm/cmb.h12
-rw-r--r--kernel/arch/s390/include/asm/cmpxchg.h91
-rw-r--r--kernel/arch/s390/include/asm/compat.h359
-rw-r--r--kernel/arch/s390/include/asm/cpcmd.h32
-rw-r--r--kernel/arch/s390/include/asm/cpu.h26
-rw-r--r--kernel/arch/s390/include/asm/cpu_mf.h297
-rw-r--r--kernel/arch/s390/include/asm/cputime.h148
-rw-r--r--kernel/arch/s390/include/asm/crw.h69
-rw-r--r--kernel/arch/s390/include/asm/css_chars.h38
-rw-r--r--kernel/arch/s390/include/asm/ctl_reg.h72
-rw-r--r--kernel/arch/s390/include/asm/current.h18
-rw-r--r--kernel/arch/s390/include/asm/debug.h262
-rw-r--r--kernel/arch/s390/include/asm/delay.h24
-rw-r--r--kernel/arch/s390/include/asm/device.h7
-rw-r--r--kernel/arch/s390/include/asm/diag.h52
-rw-r--r--kernel/arch/s390/include/asm/dis.h53
-rw-r--r--kernel/arch/s390/include/asm/div64.h1
-rw-r--r--kernel/arch/s390/include/asm/dma-mapping.h90
-rw-r--r--kernel/arch/s390/include/asm/dma.h19
-rw-r--r--kernel/arch/s390/include/asm/eadm.h117
-rw-r--r--kernel/arch/s390/include/asm/ebcdic.h48
-rw-r--r--kernel/arch/s390/include/asm/elf.h227
-rw-r--r--kernel/arch/s390/include/asm/emergency-restart.h6
-rw-r--r--kernel/arch/s390/include/asm/etr.h256
-rw-r--r--kernel/arch/s390/include/asm/exec.h12
-rw-r--r--kernel/arch/s390/include/asm/extmem.h31
-rw-r--r--kernel/arch/s390/include/asm/facility.h67
-rw-r--r--kernel/arch/s390/include/asm/fb.h12
-rw-r--r--kernel/arch/s390/include/asm/fcx.h311
-rw-r--r--kernel/arch/s390/include/asm/ftrace.h84
-rw-r--r--kernel/arch/s390/include/asm/futex.h96
-rw-r--r--kernel/arch/s390/include/asm/hardirq.h26
-rw-r--r--kernel/arch/s390/include/asm/hugetlb.h115
-rw-r--r--kernel/arch/s390/include/asm/hw_irq.h11
-rw-r--r--kernel/arch/s390/include/asm/idals.h232
-rw-r--r--kernel/arch/s390/include/asm/idle.h27
-rw-r--r--kernel/arch/s390/include/asm/io.h77
-rw-r--r--kernel/arch/s390/include/asm/ipl.h182
-rw-r--r--kernel/arch/s390/include/asm/irq.h106
-rw-r--r--kernel/arch/s390/include/asm/irq_regs.h1
-rw-r--r--kernel/arch/s390/include/asm/irqflags.h72
-rw-r--r--kernel/arch/s390/include/asm/isc.h28
-rw-r--r--kernel/arch/s390/include/asm/itcw.h30
-rw-r--r--kernel/arch/s390/include/asm/jump_label.h37
-rw-r--r--kernel/arch/s390/include/asm/kdebug.h27
-rw-r--r--kernel/arch/s390/include/asm/kexec.h66
-rw-r--r--kernel/arch/s390/include/asm/kmap_types.h6
-rw-r--r--kernel/arch/s390/include/asm/kprobes.h94
-rw-r--r--kernel/arch/s390/include/asm/kvm_host.h642
-rw-r--r--kernel/arch/s390/include/asm/kvm_para.h157
-rw-r--r--kernel/arch/s390/include/asm/linkage.h9
-rw-r--r--kernel/arch/s390/include/asm/livepatch.h43
-rw-r--r--kernel/arch/s390/include/asm/local.h1
-rw-r--r--kernel/arch/s390/include/asm/local64.h1
-rw-r--r--kernel/arch/s390/include/asm/lowcore.h217
-rw-r--r--kernel/arch/s390/include/asm/mathemu.h28
-rw-r--r--kernel/arch/s390/include/asm/mman.h15
-rw-r--r--kernel/arch/s390/include/asm/mmu.h44
-rw-r--r--kernel/arch/s390/include/asm/mmu_context.h133
-rw-r--r--kernel/arch/s390/include/asm/module.h34
-rw-r--r--kernel/arch/s390/include/asm/mutex.h9
-rw-r--r--kernel/arch/s390/include/asm/nmi.h66
-rw-r--r--kernel/arch/s390/include/asm/os_info.h49
-rw-r--r--kernel/arch/s390/include/asm/page.h156
-rw-r--r--kernel/arch/s390/include/asm/pci.h191
-rw-r--r--kernel/arch/s390/include/asm/pci_clp.h186
-rw-r--r--kernel/arch/s390/include/asm/pci_debug.h28
-rw-r--r--kernel/arch/s390/include/asm/pci_dma.h196
-rw-r--r--kernel/arch/s390/include/asm/pci_insn.h86
-rw-r--r--kernel/arch/s390/include/asm/pci_io.h201
-rw-r--r--kernel/arch/s390/include/asm/percpu.h186
-rw-r--r--kernel/arch/s390/include/asm/perf_event.h93
-rw-r--r--kernel/arch/s390/include/asm/pgalloc.h132
-rw-r--r--kernel/arch/s390/include/asm/pgtable.h1637
-rw-r--r--kernel/arch/s390/include/asm/processor.h360
-rw-r--r--kernel/arch/s390/include/asm/ptrace.h177
-rw-r--r--kernel/arch/s390/include/asm/qdio.h430
-rw-r--r--kernel/arch/s390/include/asm/reset.h20
-rw-r--r--kernel/arch/s390/include/asm/runtime_instr.h90
-rw-r--r--kernel/arch/s390/include/asm/rwsem.h237
-rw-r--r--kernel/arch/s390/include/asm/schid.h21
-rw-r--r--kernel/arch/s390/include/asm/sclp.h78
-rw-r--r--kernel/arch/s390/include/asm/scsw.h988
-rw-r--r--kernel/arch/s390/include/asm/seccomp.h16
-rw-r--r--kernel/arch/s390/include/asm/sections.h8
-rw-r--r--kernel/arch/s390/include/asm/segment.h4
-rw-r--r--kernel/arch/s390/include/asm/serial.h6
-rw-r--r--kernel/arch/s390/include/asm/setup.h119
-rw-r--r--kernel/arch/s390/include/asm/sfp-machine.h142
-rw-r--r--kernel/arch/s390/include/asm/sfp-util.h67
-rw-r--r--kernel/arch/s390/include/asm/shmparam.h11
-rw-r--r--kernel/arch/s390/include/asm/signal.h25
-rw-r--r--kernel/arch/s390/include/asm/sigp.h57
-rw-r--r--kernel/arch/s390/include/asm/smp.h80
-rw-r--r--kernel/arch/s390/include/asm/sparsemem.h7
-rw-r--r--kernel/arch/s390/include/asm/spinlock.h280
-rw-r--r--kernel/arch/s390/include/asm/spinlock_types.h21
-rw-r--r--kernel/arch/s390/include/asm/string.h142
-rw-r--r--kernel/arch/s390/include/asm/switch_to.h175
-rw-r--r--kernel/arch/s390/include/asm/syscall.h100
-rw-r--r--kernel/arch/s390/include/asm/sysinfo.h179
-rw-r--r--kernel/arch/s390/include/asm/termios.h25
-rw-r--r--kernel/arch/s390/include/asm/thread_info.h99
-rw-r--r--kernel/arch/s390/include/asm/timex.h163
-rw-r--r--kernel/arch/s390/include/asm/tlb.h148
-rw-r--r--kernel/arch/s390/include/asm/tlbflush.h204
-rw-r--r--kernel/arch/s390/include/asm/topology.h56
-rw-r--r--kernel/arch/s390/include/asm/types.h11
-rw-r--r--kernel/arch/s390/include/asm/uaccess.h382
-rw-r--r--kernel/arch/s390/include/asm/unaligned.h13
-rw-r--r--kernel/arch/s390/include/asm/unistd.h49
-rw-r--r--kernel/arch/s390/include/asm/uprobes.h42
-rw-r--r--kernel/arch/s390/include/asm/user.h74
-rw-r--r--kernel/arch/s390/include/asm/vdso.h49
-rw-r--r--kernel/arch/s390/include/asm/vga.h6
-rw-r--r--kernel/arch/s390/include/asm/vtime.h7
-rw-r--r--kernel/arch/s390/include/asm/vtimer.h31
-rw-r--r--kernel/arch/s390/include/asm/xor.h1
135 files changed, 15780 insertions, 0 deletions
diff --git a/kernel/arch/s390/include/asm/Kbuild b/kernel/arch/s390/include/asm/Kbuild
new file mode 100644
index 000000000..c631f98fd
--- /dev/null
+++ b/kernel/arch/s390/include/asm/Kbuild
@@ -0,0 +1,8 @@
+
+
+generic-y += clkdev.h
+generic-y += irq_work.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
+generic-y += scatterlist.h
+generic-y += trace_clock.h
diff --git a/kernel/arch/s390/include/asm/airq.h b/kernel/arch/s390/include/asm/airq.h
new file mode 100644
index 000000000..bd93ff666
--- /dev/null
+++ b/kernel/arch/s390/include/asm/airq.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright IBM Corp. 2002, 2007
+ * Author(s): Ingo Adlung <adlung@de.ibm.com>
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Arnd Bergmann <arndb@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_AIRQ_H
+#define _ASM_S390_AIRQ_H
+
+#include <linux/bit_spinlock.h>
+
+struct airq_struct {
+ struct hlist_node list; /* Handler queueing. */
+ void (*handler)(struct airq_struct *); /* Thin-interrupt handler */
+ u8 *lsi_ptr; /* Local-Summary-Indicator pointer */
+ u8 lsi_mask; /* Local-Summary-Indicator mask */
+ u8 isc; /* Interrupt-subclass */
+ u8 flags;
+};
+
+#define AIRQ_PTR_ALLOCATED 0x01
+
+int register_adapter_interrupt(struct airq_struct *airq);
+void unregister_adapter_interrupt(struct airq_struct *airq);
+
+/* Adapter interrupt bit vector */
+struct airq_iv {
+ unsigned long *vector; /* Adapter interrupt bit vector */
+ unsigned long *avail; /* Allocation bit mask for the bit vector */
+ unsigned long *bitlock; /* Lock bit mask for the bit vector */
+ unsigned long *ptr; /* Pointer associated with each bit */
+ unsigned int *data; /* 32 bit value associated with each bit */
+ unsigned long bits; /* Number of bits in the vector */
+ unsigned long end; /* Number of highest allocated bit + 1 */
+ spinlock_t lock; /* Lock to protect alloc & free */
+};
+
+#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
+#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */
+#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
+#define AIRQ_IV_DATA 8 /* Allocate the data array */
+
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
+void airq_iv_release(struct airq_iv *iv);
+unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num);
+void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num);
+unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
+ unsigned long end);
+
+static inline unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
+{
+ return airq_iv_alloc(iv, 1);
+}
+
+static inline void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
+{
+ airq_iv_free(iv, bit, 1);
+}
+
+static inline unsigned long airq_iv_end(struct airq_iv *iv)
+{
+ return iv->end;
+}
+
+static inline void airq_iv_lock(struct airq_iv *iv, unsigned long bit)
+{
+ const unsigned long be_to_le = BITS_PER_LONG - 1;
+ bit_spin_lock(bit ^ be_to_le, iv->bitlock);
+}
+
+static inline void airq_iv_unlock(struct airq_iv *iv, unsigned long bit)
+{
+ const unsigned long be_to_le = BITS_PER_LONG - 1;
+ bit_spin_unlock(bit ^ be_to_le, iv->bitlock);
+}
+
+static inline void airq_iv_set_data(struct airq_iv *iv, unsigned long bit,
+ unsigned int data)
+{
+ iv->data[bit] = data;
+}
+
+static inline unsigned int airq_iv_get_data(struct airq_iv *iv,
+ unsigned long bit)
+{
+ return iv->data[bit];
+}
+
+static inline void airq_iv_set_ptr(struct airq_iv *iv, unsigned long bit,
+ unsigned long ptr)
+{
+ iv->ptr[bit] = ptr;
+}
+
+static inline unsigned long airq_iv_get_ptr(struct airq_iv *iv,
+ unsigned long bit)
+{
+ return iv->ptr[bit];
+}
+
+#endif /* _ASM_S390_AIRQ_H */
diff --git a/kernel/arch/s390/include/asm/appldata.h b/kernel/arch/s390/include/asm/appldata.h
new file mode 100644
index 000000000..16887c5fd
--- /dev/null
+++ b/kernel/arch/s390/include/asm/appldata.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright IBM Corp. 2006
+ *
+ * Author(s): Melissa Howland <melissah@us.ibm.com>
+ */
+
+#ifndef _ASM_S390_APPLDATA_H
+#define _ASM_S390_APPLDATA_H
+
+#include <asm/io.h>
+
+#define APPLDATA_START_INTERVAL_REC 0x80
+#define APPLDATA_STOP_REC 0x81
+#define APPLDATA_GEN_EVENT_REC 0x82
+#define APPLDATA_START_CONFIG_REC 0x83
+
+/*
+ * Parameter list for DIAGNOSE X'DC'
+ */
+struct appldata_parameter_list {
+ u16 diag;
+ u8 function;
+ u8 parlist_length;
+ u32 unused01;
+ u16 reserved;
+ u16 buffer_length;
+ u32 unused02;
+ u64 product_id_addr;
+ u64 buffer_addr;
+} __attribute__ ((packed));
+
+struct appldata_product_id {
+ char prod_nr[7]; /* product number */
+ u16 prod_fn; /* product function */
+ u8 record_nr; /* record number */
+ u16 version_nr; /* version */
+ u16 release_nr; /* release */
+ u16 mod_lvl; /* modification level */
+} __attribute__ ((packed));
+
+static inline int appldata_asm(struct appldata_product_id *id,
+ unsigned short fn, void *buffer,
+ unsigned short length)
+{
+ struct appldata_parameter_list parm_list;
+ int ry;
+
+ if (!MACHINE_IS_VM)
+ return -EOPNOTSUPP;
+ parm_list.diag = 0xdc;
+ parm_list.function = fn;
+ parm_list.parlist_length = sizeof(parm_list);
+ parm_list.buffer_length = length;
+ parm_list.product_id_addr = (unsigned long) id;
+ parm_list.buffer_addr = virt_to_phys(buffer);
+ asm volatile(
+ " diag %1,%0,0xdc"
+ : "=d" (ry)
+ : "d" (&parm_list), "m" (parm_list), "m" (*id)
+ : "cc");
+ return ry;
+}
+
+#endif /* _ASM_S390_APPLDATA_H */
diff --git a/kernel/arch/s390/include/asm/asm-offsets.h b/kernel/arch/s390/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/kernel/arch/s390/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/kernel/arch/s390/include/asm/atomic.h b/kernel/arch/s390/include/asm/atomic.h
new file mode 100644
index 000000000..adbe3802e
--- /dev/null
+++ b/kernel/arch/s390/include/asm/atomic.h
@@ -0,0 +1,320 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Denis Joseph Barrow,
+ * Arnd Bergmann <arndb@de.ibm.com>,
+ *
+ * Atomic operations that C can't guarantee us.
+ * Useful for resource counting etc.
+ * s390 uses 'Compare And Swap' for atomicity in SMP environment.
+ *
+ */
+
+#ifndef __ARCH_S390_ATOMIC__
+#define __ARCH_S390_ATOMIC__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define __ATOMIC_NO_BARRIER "\n"
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OR "lao"
+#define __ATOMIC_AND "lan"
+#define __ATOMIC_ADD "laa"
+#define __ATOMIC_BARRIER "bcr 14,0\n"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
+({ \
+ int old_val; \
+ \
+ typecheck(atomic_t *, ptr); \
+ asm volatile( \
+ __barrier \
+ op_string " %0,%2,%1\n" \
+ __barrier \
+ : "=d" (old_val), "+Q" ((ptr)->counter) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OR "or"
+#define __ATOMIC_AND "nr"
+#define __ATOMIC_ADD "ar"
+#define __ATOMIC_BARRIER "\n"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
+({ \
+ int old_val, new_val; \
+ \
+ typecheck(atomic_t *, ptr); \
+ asm volatile( \
+ " l %0,%2\n" \
+ "0: lr %1,%0\n" \
+ op_string " %1,%3\n" \
+ " cs %0,%1,%2\n" \
+ " jl 0b" \
+ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+static inline int atomic_read(const atomic_t *v)
+{
+ int c;
+
+ asm volatile(
+ " l %0,%1\n"
+ : "=d" (c) : "Q" (v->counter));
+ return c;
+}
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+ asm volatile(
+ " st %1,%0\n"
+ : "=Q" (v->counter) : "d" (i));
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+ asm volatile(
+ "asi %0,%1\n"
+ : "+Q" (v->counter)
+ : "i" (i)
+ : "cc", "memory");
+ return;
+ }
+#endif
+ __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
+}
+
+#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
+#define atomic_inc(_v) atomic_add(1, _v)
+#define atomic_inc_return(_v) atomic_add_return(1, _v)
+#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
+#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
+#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
+#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
+#define atomic_dec(_v) atomic_sub(1, _v)
+#define atomic_dec_return(_v) atomic_sub_return(1, _v)
+#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
+}
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ asm volatile(
+ " cs %0,%2,%1"
+ : "+d" (old), "+Q" (v->counter)
+ : "d" (new)
+ : "cc", "memory");
+ return old;
+}
+
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ old = atomic_cmpxchg(v, c, c + a);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c;
+}
+
+
+#undef __ATOMIC_LOOP
+
+#define ATOMIC64_INIT(i) { (i) }
+
+#define __ATOMIC64_NO_BARRIER "\n"
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC64_OR "laog"
+#define __ATOMIC64_AND "lang"
+#define __ATOMIC64_ADD "laag"
+#define __ATOMIC64_BARRIER "bcr 14,0\n"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
+({ \
+ long long old_val; \
+ \
+ typecheck(atomic64_t *, ptr); \
+ asm volatile( \
+ __barrier \
+ op_string " %0,%2,%1\n" \
+ __barrier \
+ : "=d" (old_val), "+Q" ((ptr)->counter) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC64_OR "ogr"
+#define __ATOMIC64_AND "ngr"
+#define __ATOMIC64_ADD "agr"
+#define __ATOMIC64_BARRIER "\n"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
+({ \
+ long long old_val, new_val; \
+ \
+ typecheck(atomic64_t *, ptr); \
+ asm volatile( \
+ " lg %0,%2\n" \
+ "0: lgr %1,%0\n" \
+ op_string " %1,%3\n" \
+ " csg %0,%1,%2\n" \
+ " jl 0b" \
+ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+static inline long long atomic64_read(const atomic64_t *v)
+{
+ long long c;
+
+ asm volatile(
+ " lg %0,%1\n"
+ : "=d" (c) : "Q" (v->counter));
+ return c;
+}
+
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+ asm volatile(
+ " stg %1,%0\n"
+ : "=Q" (v->counter) : "d" (i));
+}
+
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+ return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
+}
+
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+ asm volatile(
+ "agsi %0,%1\n"
+ : "+Q" (v->counter)
+ : "i" (i)
+ : "cc", "memory");
+ return;
+ }
+#endif
+ __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
+}
+
+static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
+{
+ __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
+}
+
+static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
+{
+ __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
+}
+
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+static inline long long atomic64_cmpxchg(atomic64_t *v,
+ long long old, long long new)
+{
+ asm volatile(
+ " csg %0,%2,%1"
+ : "+d" (old), "+Q" (v->counter)
+ : "d" (new)
+ : "cc", "memory");
+ return old;
+}
+
+#undef __ATOMIC64_LOOP
+
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+ long long c, old;
+
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ old = atomic64_cmpxchg(v, c, c + i);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long c, old, dec;
+
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
+#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
+#define atomic64_inc(_v) atomic64_add(1, _v)
+#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
+#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
+#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
+#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
+#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
+#define atomic64_dec(_v) atomic64_sub(1, _v)
+#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
+#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
+#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/kernel/arch/s390/include/asm/barrier.h b/kernel/arch/s390/include/asm/barrier.h
new file mode 100644
index 000000000..8d724718e
--- /dev/null
+++ b/kernel/arch/s390/include/asm/barrier.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+/* Fast-BCR without checkpoint synchronization */
+#define __ASM_BARRIER "bcr 14,0\n"
+#else
+#define __ASM_BARRIER "bcr 15,0\n"
+#endif
+
+#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
+
+#define rmb() mb()
+#define wmb() mb()
+#define dma_rmb() rmb()
+#define dma_wmb() wmb()
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+
+#define read_barrier_depends() do { } while (0)
+#define smp_read_barrier_depends() do { } while (0)
+
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic() smp_mb()
+
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
+#endif /* __ASM_BARRIER_H */
diff --git a/kernel/arch/s390/include/asm/bitops.h b/kernel/arch/s390/include/asm/bitops.h
new file mode 100644
index 000000000..9b68e98a7
--- /dev/null
+++ b/kernel/arch/s390/include/asm/bitops.h
@@ -0,0 +1,454 @@
+/*
+ * Copyright IBM Corp. 1999,2013
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ *
+ * The description below was taken in large parts from the powerpc
+ * bitops header file:
+ * Within a word, bits are numbered LSB first. Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for an
+ * s390x system the bits end up numbered:
+ * |63..............0|127............64|191...........128|255...........192|
+ * and on s390:
+ * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ *
+ * We also have special functions which work with an MSB0 encoding:
+ * on an s390x system the bits are numbered:
+ * |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *
+ * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
+ * number field needs to be reversed compared to the LSB0 encoded bit
+ * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
+ *
+ */
+
+#ifndef _S390_BITOPS_H
+#define _S390_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/typecheck.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+#define __BITOPS_NO_BARRIER "\n"
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __BITOPS_OR "laog"
+#define __BITOPS_AND "lang"
+#define __BITOPS_XOR "laxg"
+#define __BITOPS_BARRIER "bcr 14,0\n"
+
+#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
+({ \
+ unsigned long __old; \
+ \
+ typecheck(unsigned long *, (__addr)); \
+ asm volatile( \
+ __barrier \
+ __op_string " %0,%2,%1\n" \
+ __barrier \
+ : "=d" (__old), "+Q" (*(__addr)) \
+ : "d" (__val) \
+ : "cc", "memory"); \
+ __old; \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __BITOPS_OR "ogr"
+#define __BITOPS_AND "ngr"
+#define __BITOPS_XOR "xgr"
+#define __BITOPS_BARRIER "\n"
+
+#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
+({ \
+ unsigned long __old, __new; \
+ \
+ typecheck(unsigned long *, (__addr)); \
+ asm volatile( \
+ " lg %0,%2\n" \
+ "0: lgr %1,%0\n" \
+ __op_string " %1,%3\n" \
+ " csg %0,%1,%2\n" \
+ " jl 0b" \
+ : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
+ : "d" (__val) \
+ : "cc", "memory"); \
+ __old; \
+})
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
+
+static inline unsigned long *
+__bitops_word(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
+ return (unsigned long *)addr;
+}
+
+static inline unsigned char *
+__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
+{
+ return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+}
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
+
+ asm volatile(
+ "oi %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (1 << (nr & 7))
+ : "cc", "memory");
+ return;
+ }
+#endif
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
+ __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
+
+ asm volatile(
+ "ni %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (~(1 << (nr & 7)))
+ : "cc", "memory");
+ return;
+ }
+#endif
+ mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
+ __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ if (__builtin_constant_p(nr)) {
+ unsigned char *caddr = __bitops_byte(nr, ptr);
+
+ asm volatile(
+ "xi %0,%b1\n"
+ : "+Q" (*caddr)
+ : "i" (1 << (nr & 7))
+ : "cc", "memory");
+ return;
+ }
+#endif
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
+ __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
+}
+
+static inline int
+test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
+
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
+ return (old & mask) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
+
+ mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
+ return (old & ~mask) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long old, mask;
+
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
+ return (old & mask) != 0;
+}
+
+static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned char *addr = __bitops_byte(nr, ptr);
+
+ *addr |= 1 << (nr & 7);
+}
+
+static inline void
+__clear_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned char *addr = __bitops_byte(nr, ptr);
+
+ *addr &= ~(1 << (nr & 7));
+}
+
+static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned char *addr = __bitops_byte(nr, ptr);
+
+ *addr ^= 1 << (nr & 7);
+}
+
+static inline int
+__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned char ch;
+
+ ch = *addr;
+ *addr |= 1 << (nr & 7);
+ return (ch >> (nr & 7)) & 1;
+}
+
+static inline int
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned char ch;
+
+ ch = *addr;
+ *addr &= ~(1 << (nr & 7));
+ return (ch >> (nr & 7)) & 1;
+}
+
+static inline int
+__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+ unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned char ch;
+
+ ch = *addr;
+ *addr ^= 1 << (nr & 7);
+ return (ch >> (nr & 7)) & 1;
+}
+
+static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
+{
+ const volatile unsigned char *addr;
+
+ addr = ((const volatile unsigned char *)ptr);
+ addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
+ return (*addr >> (nr & 7)) & 1;
+}
+
+/*
+ * Functions which use MSB0 bit numbering.
+ * On an s390x system the bits are numbered:
+ * |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ */
+unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
+unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
+ unsigned long offset);
+
+static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
+{
+ return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+}
+
+static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
+{
+ return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+}
+
+static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
+{
+ return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+}
+
+static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
+{
+ return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+}
+
+static inline int test_bit_inv(unsigned long nr,
+ const volatile unsigned long *ptr)
+{
+ return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+}
+
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+
+/**
+ * __flogr - find leftmost one
+ * @word - The word to search
+ *
+ * Returns the bit number of the most significant bit set,
+ * where the most significant bit has bit number 0.
+ * If no bit is set this function returns 64.
+ */
+static inline unsigned char __flogr(unsigned long word)
+{
+ if (__builtin_constant_p(word)) {
+ unsigned long bit = 0;
+
+ if (!word)
+ return 64;
+ if (!(word & 0xffffffff00000000UL)) {
+ word <<= 32;
+ bit += 32;
+ }
+ if (!(word & 0xffff000000000000UL)) {
+ word <<= 16;
+ bit += 16;
+ }
+ if (!(word & 0xff00000000000000UL)) {
+ word <<= 8;
+ bit += 8;
+ }
+ if (!(word & 0xf000000000000000UL)) {
+ word <<= 4;
+ bit += 4;
+ }
+ if (!(word & 0xc000000000000000UL)) {
+ word <<= 2;
+ bit += 2;
+ }
+ if (!(word & 0x8000000000000000UL)) {
+ word <<= 1;
+ bit += 1;
+ }
+ return bit;
+ } else {
+ register unsigned long bit asm("4") = word;
+ register unsigned long out asm("5");
+
+ asm volatile(
+ " flogr %[bit],%[bit]\n"
+ : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
+ return bit;
+ }
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+ return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
+}
+
+/**
+ * ffs - find first bit set
+ * @word: the word to search
+ *
+ * This is defined the same way as the libc and
+ * compiler builtin ffs routines (man ffs).
+ */
+static inline int ffs(int word)
+{
+ unsigned long mask = 2 * BITS_PER_LONG - 1;
+ unsigned int val = (unsigned int)word;
+
+ return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
+}
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ return __flogr(word) ^ (BITS_PER_LONG - 1);
+}
+
+/**
+ * fls64 - find last set bit in a 64-bit word
+ * @word: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
+ */
+static inline int fls64(unsigned long word)
+{
+ unsigned long mask = 2 * BITS_PER_LONG - 1;
+
+ return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
+}
+
+/**
+ * fls - find last (most-significant) bit set
+ * @word: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static inline int fls(int word)
+{
+ return fls64((unsigned int)word);
+}
+
+#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
+
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
+
+#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* _S390_BITOPS_H */
diff --git a/kernel/arch/s390/include/asm/bug.h b/kernel/arch/s390/include/asm/bug.h
new file mode 100644
index 000000000..bf90d1fd9
--- /dev/null
+++ b/kernel/arch/s390/include/asm/bug.h
@@ -0,0 +1,71 @@
+#ifndef _ASM_S390_BUG_H
+#define _ASM_S390_BUG_H
+
+#include <linux/kernel.h>
+
+#ifdef CONFIG_BUG
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define __EMIT_BUG(x) do { \
+ asm volatile( \
+ "0: j 0b+2\n" \
+ "1:\n" \
+ ".section .rodata.str,\"aMS\",@progbits,1\n" \
+ "2: .asciz \""__FILE__"\"\n" \
+ ".previous\n" \
+ ".section __bug_table,\"a\"\n" \
+ "3: .long 1b-3b,2b-3b\n" \
+ " .short %0,%1\n" \
+ " .org 3b+%2\n" \
+ ".previous\n" \
+ : : "i" (__LINE__), \
+ "i" (x), \
+ "i" (sizeof(struct bug_entry))); \
+} while (0)
+
+#else /* CONFIG_DEBUG_BUGVERBOSE */
+
+#define __EMIT_BUG(x) do { \
+ asm volatile( \
+ "0: j 0b+2\n" \
+ "1:\n" \
+ ".section __bug_table,\"a\"\n" \
+ "2: .long 1b-2b\n" \
+ " .short %0\n" \
+ " .org 2b+%1\n" \
+ ".previous\n" \
+ : : "i" (x), \
+ "i" (sizeof(struct bug_entry))); \
+} while (0)
+
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
+#define BUG() do { \
+ __EMIT_BUG(0); \
+ unreachable(); \
+} while (0)
+
+#define __WARN_TAINT(taint) do { \
+ __EMIT_BUG(BUGFLAG_TAINT(taint)); \
+} while (0)
+
+#define WARN_ON(x) ({ \
+ int __ret_warn_on = !!(x); \
+ if (__builtin_constant_p(__ret_warn_on)) { \
+ if (__ret_warn_on) \
+ __WARN(); \
+ } else { \
+ if (unlikely(__ret_warn_on)) \
+ __WARN(); \
+ } \
+ unlikely(__ret_warn_on); \
+})
+
+#define HAVE_ARCH_BUG
+#define HAVE_ARCH_WARN_ON
+#endif /* CONFIG_BUG */
+
+#include <asm-generic/bug.h>
+
+#endif /* _ASM_S390_BUG_H */
diff --git a/kernel/arch/s390/include/asm/bugs.h b/kernel/arch/s390/include/asm/bugs.h
new file mode 100644
index 000000000..0f5bd894f
--- /dev/null
+++ b/kernel/arch/s390/include/asm/bugs.h
@@ -0,0 +1,20 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/bugs.h"
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ * void check_bugs(void);
+ */
+
+static inline void check_bugs(void)
+{
+ /* s390 has no bugs ... */
+}
diff --git a/kernel/arch/s390/include/asm/cache.h b/kernel/arch/s390/include/asm/cache.h
new file mode 100644
index 000000000..4d7ccac5f
--- /dev/null
+++ b/kernel/arch/s390/include/asm/cache.h
@@ -0,0 +1,18 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999
+ *
+ * Derived from "include/asm-i386/cache.h"
+ * Copyright (C) 1992, Linus Torvalds
+ */
+
+#ifndef __ARCH_S390_CACHE_H
+#define __ARCH_S390_CACHE_H
+
+#define L1_CACHE_BYTES 256
+#define L1_CACHE_SHIFT 8
+#define NET_SKB_PAD 32
+
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+#endif
diff --git a/kernel/arch/s390/include/asm/cacheflush.h b/kernel/arch/s390/include/asm/cacheflush.h
new file mode 100644
index 000000000..58fae7d09
--- /dev/null
+++ b/kernel/arch/s390/include/asm/cacheflush.h
@@ -0,0 +1,12 @@
+#ifndef _S390_CACHEFLUSH_H
+#define _S390_CACHEFLUSH_H
+
+/* Caches aren't brain-dead on the s390. */
+#include <asm-generic/cacheflush.h>
+
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+
+#endif /* _S390_CACHEFLUSH_H */
diff --git a/kernel/arch/s390/include/asm/ccwdev.h b/kernel/arch/s390/include/asm/ccwdev.h
new file mode 100644
index 000000000..b80e456d6
--- /dev/null
+++ b/kernel/arch/s390/include/asm/ccwdev.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright IBM Corp. 2002, 2009
+ *
+ * Author(s): Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * Interface for CCW device drivers
+ */
+#ifndef _S390_CCWDEV_H_
+#define _S390_CCWDEV_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <asm/fcx.h>
+#include <asm/irq.h>
+#include <asm/schid.h>
+
+/* structs from asm/cio.h */
+struct irb;
+struct ccw1;
+struct ccw_dev_id;
+
+/* simplified initializers for struct ccw_device:
+ * CCW_DEVICE and CCW_DEVICE_DEVTYPE initialize one
+ * entry in your MODULE_DEVICE_TABLE and set the match_flag correctly */
+#define CCW_DEVICE(cu, cum) \
+ .cu_type=(cu), .cu_model=(cum), \
+ .match_flags=(CCW_DEVICE_ID_MATCH_CU_TYPE \
+ | (cum ? CCW_DEVICE_ID_MATCH_CU_MODEL : 0))
+
+#define CCW_DEVICE_DEVTYPE(cu, cum, dev, devm) \
+ .cu_type=(cu), .cu_model=(cum), .dev_type=(dev), .dev_model=(devm),\
+ .match_flags=CCW_DEVICE_ID_MATCH_CU_TYPE \
+ | ((cum) ? CCW_DEVICE_ID_MATCH_CU_MODEL : 0) \
+ | CCW_DEVICE_ID_MATCH_DEVICE_TYPE \
+ | ((devm) ? CCW_DEVICE_ID_MATCH_DEVICE_MODEL : 0)
+
+/* scan through an array of device ids and return the first
+ * entry that matches the device.
+ *
+ * the array must end with an entry containing zero match_flags
+ */
+static inline const struct ccw_device_id *
+ccw_device_id_match(const struct ccw_device_id *array,
+ const struct ccw_device_id *match)
+{
+ const struct ccw_device_id *id = array;
+
+ for (id = array; id->match_flags; id++) {
+ if ((id->match_flags & CCW_DEVICE_ID_MATCH_CU_TYPE)
+ && (id->cu_type != match->cu_type))
+ continue;
+
+ if ((id->match_flags & CCW_DEVICE_ID_MATCH_CU_MODEL)
+ && (id->cu_model != match->cu_model))
+ continue;
+
+ if ((id->match_flags & CCW_DEVICE_ID_MATCH_DEVICE_TYPE)
+ && (id->dev_type != match->dev_type))
+ continue;
+
+ if ((id->match_flags & CCW_DEVICE_ID_MATCH_DEVICE_MODEL)
+ && (id->dev_model != match->dev_model))
+ continue;
+
+ return id;
+ }
+
+ return NULL;
+}
+
+/**
+ * struct ccw_device - channel attached device
+ * @ccwlock: pointer to device lock
+ * @id: id of this device
+ * @drv: ccw driver for this device
+ * @dev: embedded device structure
+ * @online: online status of device
+ * @handler: interrupt handler
+ *
+ * @handler is a member of the device rather than the driver since a driver
+ * can have different interrupt handlers for different ccw devices
+ * (multi-subchannel drivers).
+ */
+struct ccw_device {
+ spinlock_t *ccwlock;
+/* private: */
+ struct ccw_device_private *private; /* cio private information */
+/* public: */
+ struct ccw_device_id id;
+ struct ccw_driver *drv;
+ struct device dev;
+ int online;
+ void (*handler) (struct ccw_device *, unsigned long, struct irb *);
+};
+
+/*
+ * Possible events used by the path_event notifier.
+ */
+#define PE_NONE 0x0
+#define PE_PATH_GONE 0x1 /* A path is no longer available. */
+#define PE_PATH_AVAILABLE 0x2 /* A path has become available and
+ was successfully verified. */
+#define PE_PATHGROUP_ESTABLISHED 0x4 /* A pathgroup was reset and had
+ to be established again. */
+
+/*
+ * Possible CIO actions triggered by the unit check handler.
+ */
+enum uc_todo {
+ UC_TODO_RETRY,
+ UC_TODO_RETRY_ON_NEW_PATH,
+ UC_TODO_STOP
+};
+
+/**
+ * struct ccw driver - device driver for channel attached devices
+ * @ids: ids supported by this driver
+ * @probe: function called on probe
+ * @remove: function called on remove
+ * @set_online: called when setting device online
+ * @set_offline: called when setting device offline
+ * @notify: notify driver of device state changes
+ * @path_event: notify driver of channel path events
+ * @shutdown: called at device shutdown
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
+ * @uc_handler: callback for unit check handler
+ * @driver: embedded device driver structure
+ * @int_class: interruption class to use for accounting interrupts
+ */
+struct ccw_driver {
+ struct ccw_device_id *ids;
+ int (*probe) (struct ccw_device *);
+ void (*remove) (struct ccw_device *);
+ int (*set_online) (struct ccw_device *);
+ int (*set_offline) (struct ccw_device *);
+ int (*notify) (struct ccw_device *, int);
+ void (*path_event) (struct ccw_device *, int *);
+ void (*shutdown) (struct ccw_device *);
+ int (*prepare) (struct ccw_device *);
+ void (*complete) (struct ccw_device *);
+ int (*freeze)(struct ccw_device *);
+ int (*thaw) (struct ccw_device *);
+ int (*restore)(struct ccw_device *);
+ enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
+ struct device_driver driver;
+ enum interruption_class int_class;
+};
+
+extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
+ const char *bus_id);
+
+/* devices drivers call these during module load and unload.
+ * When a driver is registered, its probe method is called
+ * when new devices for its type pop up */
+extern int ccw_driver_register (struct ccw_driver *driver);
+extern void ccw_driver_unregister (struct ccw_driver *driver);
+
+struct ccw1;
+
+extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long);
+extern int ccw_device_set_options(struct ccw_device *, unsigned long);
+extern void ccw_device_clear_options(struct ccw_device *, unsigned long);
+int ccw_device_is_pathgroup(struct ccw_device *cdev);
+int ccw_device_is_multipath(struct ccw_device *cdev);
+
+/* Allow for i/o completion notification after primary interrupt status. */
+#define CCWDEV_EARLY_NOTIFICATION 0x0001
+/* Report all interrupt conditions. */
+#define CCWDEV_REPORT_ALL 0x0002
+/* Try to perform path grouping. */
+#define CCWDEV_DO_PATHGROUP 0x0004
+/* Allow forced onlining of boxed devices. */
+#define CCWDEV_ALLOW_FORCE 0x0008
+/* Try to use multipath mode. */
+#define CCWDEV_DO_MULTIPATH 0x0010
+
+extern int ccw_device_start(struct ccw_device *, struct ccw1 *,
+ unsigned long, __u8, unsigned long);
+extern int ccw_device_start_timeout(struct ccw_device *, struct ccw1 *,
+ unsigned long, __u8, unsigned long, int);
+extern int ccw_device_start_key(struct ccw_device *, struct ccw1 *,
+ unsigned long, __u8, __u8, unsigned long);
+extern int ccw_device_start_timeout_key(struct ccw_device *, struct ccw1 *,
+ unsigned long, __u8, __u8,
+ unsigned long, int);
+
+
+extern int ccw_device_resume(struct ccw_device *);
+extern int ccw_device_halt(struct ccw_device *, unsigned long);
+extern int ccw_device_clear(struct ccw_device *, unsigned long);
+int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
+ unsigned long intparm, u8 lpm, u8 key);
+int ccw_device_tm_start_key(struct ccw_device *, struct tcw *,
+ unsigned long, u8, u8);
+int ccw_device_tm_start_timeout_key(struct ccw_device *, struct tcw *,
+ unsigned long, u8, u8, int);
+int ccw_device_tm_start(struct ccw_device *, struct tcw *,
+ unsigned long, u8);
+int ccw_device_tm_start_timeout(struct ccw_device *, struct tcw *,
+ unsigned long, u8, int);
+int ccw_device_tm_intrg(struct ccw_device *cdev);
+
+int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask);
+
+extern int ccw_device_set_online(struct ccw_device *cdev);
+extern int ccw_device_set_offline(struct ccw_device *cdev);
+
+
+extern struct ciw *ccw_device_get_ciw(struct ccw_device *, __u32 cmd);
+extern __u8 ccw_device_get_path_mask(struct ccw_device *);
+extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
+
+#define get_ccwdev_lock(x) (x)->ccwlock
+
+#define to_ccwdev(n) container_of(n, struct ccw_device, dev)
+#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
+
+extern struct ccw_device *ccw_device_create_console(struct ccw_driver *);
+extern void ccw_device_destroy_console(struct ccw_device *);
+extern int ccw_device_enable_console(struct ccw_device *);
+extern void ccw_device_wait_idle(struct ccw_device *);
+extern int ccw_device_force_console(struct ccw_device *);
+
+int ccw_device_siosl(struct ccw_device *);
+
+extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
+
+struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *, int);
+#endif /* _S390_CCWDEV_H_ */
diff --git a/kernel/arch/s390/include/asm/ccwgroup.h b/kernel/arch/s390/include/asm/ccwgroup.h
new file mode 100644
index 000000000..057ce0ca6
--- /dev/null
+++ b/kernel/arch/s390/include/asm/ccwgroup.h
@@ -0,0 +1,73 @@
+#ifndef S390_CCWGROUP_H
+#define S390_CCWGROUP_H
+
+struct ccw_device;
+struct ccw_driver;
+
+/**
+ * struct ccwgroup_device - ccw group device
+ * @state: online/offline state
+ * @count: number of attached slave devices
+ * @dev: embedded device structure
+ * @cdev: variable number of slave devices, allocated as needed
+ * @ungroup_work: work to be done when a ccwgroup notifier has action
+ * type %BUS_NOTIFY_UNBIND_DRIVER
+ */
+struct ccwgroup_device {
+ enum {
+ CCWGROUP_OFFLINE,
+ CCWGROUP_ONLINE,
+ } state;
+/* private: */
+ atomic_t onoff;
+ struct mutex reg_mutex;
+/* public: */
+ unsigned int count;
+ struct device dev;
+ struct work_struct ungroup_work;
+ struct ccw_device *cdev[0];
+};
+
+/**
+ * struct ccwgroup_driver - driver for ccw group devices
+ * @setup: function called during device creation to setup the device
+ * @remove: function called on remove
+ * @set_online: function called when device is set online
+ * @set_offline: function called when device is set offline
+ * @shutdown: function called when device is shut down
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
+ * @driver: embedded driver structure
+ */
+struct ccwgroup_driver {
+ int (*setup) (struct ccwgroup_device *);
+ void (*remove) (struct ccwgroup_device *);
+ int (*set_online) (struct ccwgroup_device *);
+ int (*set_offline) (struct ccwgroup_device *);
+ void (*shutdown)(struct ccwgroup_device *);
+ int (*prepare) (struct ccwgroup_device *);
+ void (*complete) (struct ccwgroup_device *);
+ int (*freeze)(struct ccwgroup_device *);
+ int (*thaw) (struct ccwgroup_device *);
+ int (*restore)(struct ccwgroup_device *);
+
+ struct device_driver driver;
+};
+
+extern int ccwgroup_driver_register (struct ccwgroup_driver *cdriver);
+extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
+int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
+ int num_devices, const char *buf);
+
+extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
+extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
+
+extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
+extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
+
+#define to_ccwgroupdev(x) container_of((x), struct ccwgroup_device, dev)
+#define to_ccwgroupdrv(x) container_of((x), struct ccwgroup_driver, driver)
+#endif
diff --git a/kernel/arch/s390/include/asm/checksum.h b/kernel/arch/s390/include/asm/checksum.h
new file mode 100644
index 000000000..740364856
--- /dev/null
+++ b/kernel/arch/s390/include/asm/checksum.h
@@ -0,0 +1,140 @@
+/*
+ * S390 fast network checksum routines
+ *
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Ulrich Hild (first version)
+ * Martin Schwidefsky (heavily optimized CKSM version)
+ * D.J. Barrow (third attempt)
+ */
+
+#ifndef _S390_CHECKSUM_H
+#define _S390_CHECKSUM_H
+
+#include <asm/uaccess.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+static inline __wsum
+csum_partial(const void *buff, int len, __wsum sum)
+{
+ register unsigned long reg2 asm("2") = (unsigned long) buff;
+ register unsigned long reg3 asm("3") = (unsigned long) len;
+
+ asm volatile(
+ "0: cksm %0,%1\n" /* do checksum on longs */
+ " jo 0b\n"
+ : "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory");
+ return sum;
+}
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ *
+ * Copy from userspace and compute checksum.
+ */
+static inline __wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum,
+ int *err_ptr)
+{
+ if (unlikely(copy_from_user(dst, src, len)))
+ *err_ptr = -EFAULT;
+ return csum_partial(dst, len, sum);
+}
+
+
+static inline __wsum
+csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
+{
+ memcpy(dst,src,len);
+ return csum_partial(dst, len, sum);
+}
+
+/*
+ * Fold a partial checksum without adding pseudo headers
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+ u32 csum = (__force u32) sum;
+
+ csum += (csum >> 16) + (csum << 16);
+ csum >>= 16;
+ return (__force __sum16) ~csum;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ return csum_fold(csum_partial(iph, ihl*4, 0));
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 32-bit checksum
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len, unsigned short proto,
+ __wsum sum)
+{
+ __u32 csum = (__force __u32)sum;
+
+ csum += (__force __u32)saddr;
+ if (csum < (__force __u32)saddr)
+ csum++;
+
+ csum += (__force __u32)daddr;
+ if (csum < (__force __u32)daddr)
+ csum++;
+
+ csum += len + proto;
+ if (csum < len + proto)
+ csum++;
+
+ return (__force __wsum)csum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len, unsigned short proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
+#endif /* _S390_CHECKSUM_H */
+
+
diff --git a/kernel/arch/s390/include/asm/chpid.h b/kernel/arch/s390/include/asm/chpid.h
new file mode 100644
index 000000000..7298eec98
--- /dev/null
+++ b/kernel/arch/s390/include/asm/chpid.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright IBM Corp. 2007, 2012
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+#ifndef _ASM_S390_CHPID_H
+#define _ASM_S390_CHPID_H
+
+#include <uapi/asm/chpid.h>
+#include <asm/cio.h>
+
+struct channel_path_desc {
+ u8 flags;
+ u8 lsn;
+ u8 desc;
+ u8 chpid;
+ u8 swla;
+ u8 zeroes;
+ u8 chla;
+ u8 chpp;
+} __packed;
+
+static inline void chp_id_init(struct chp_id *chpid)
+{
+ memset(chpid, 0, sizeof(struct chp_id));
+}
+
+static inline int chp_id_is_equal(struct chp_id *a, struct chp_id *b)
+{
+ return (a->id == b->id) && (a->cssid == b->cssid);
+}
+
+static inline void chp_id_next(struct chp_id *chpid)
+{
+ if (chpid->id < __MAX_CHPID)
+ chpid->id++;
+ else {
+ chpid->id = 0;
+ chpid->cssid++;
+ }
+}
+
+static inline int chp_id_is_valid(struct chp_id *chpid)
+{
+ return (chpid->cssid <= __MAX_CSSID);
+}
+
+
+#define chp_id_for_each(c) \
+ for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c))
+#endif /* _ASM_S390_CHPID_H */
diff --git a/kernel/arch/s390/include/asm/cio.h b/kernel/arch/s390/include/asm/cio.h
new file mode 100644
index 000000000..096339207
--- /dev/null
+++ b/kernel/arch/s390/include/asm/cio.h
@@ -0,0 +1,315 @@
+/*
+ * Common interface for I/O on S/390
+ */
+#ifndef _ASM_S390_CIO_H_
+#define _ASM_S390_CIO_H_
+
+#include <linux/spinlock.h>
+#include <asm/types.h>
+
+#define LPM_ANYPATH 0xff
+#define __MAX_CSSID 0
+#define __MAX_SUBCHANNEL 65535
+#define __MAX_SSID 3
+
+#include <asm/scsw.h>
+
+/**
+ * struct ccw1 - channel command word
+ * @cmd_code: command code
+ * @flags: flags, like IDA addressing, etc.
+ * @count: byte count
+ * @cda: data address
+ *
+ * The ccw is the basic structure to build channel programs that perform
+ * operations with the device or the control unit. Only Format-1 channel
+ * command words are supported.
+ */
+struct ccw1 {
+ __u8 cmd_code;
+ __u8 flags;
+ __u16 count;
+ __u32 cda;
+} __attribute__ ((packed,aligned(8)));
+
+#define CCW_FLAG_DC 0x80
+#define CCW_FLAG_CC 0x40
+#define CCW_FLAG_SLI 0x20
+#define CCW_FLAG_SKIP 0x10
+#define CCW_FLAG_PCI 0x08
+#define CCW_FLAG_IDA 0x04
+#define CCW_FLAG_SUSPEND 0x02
+
+#define CCW_CMD_READ_IPL 0x02
+#define CCW_CMD_NOOP 0x03
+#define CCW_CMD_BASIC_SENSE 0x04
+#define CCW_CMD_TIC 0x08
+#define CCW_CMD_STLCK 0x14
+#define CCW_CMD_SENSE_PGID 0x34
+#define CCW_CMD_SUSPEND_RECONN 0x5B
+#define CCW_CMD_RDC 0x64
+#define CCW_CMD_RELEASE 0x94
+#define CCW_CMD_SET_PGID 0xAF
+#define CCW_CMD_SENSE_ID 0xE4
+#define CCW_CMD_DCTL 0xF3
+
+#define SENSE_MAX_COUNT 0x20
+
+/**
+ * struct erw - extended report word
+ * @res0: reserved
+ * @auth: authorization check
+ * @pvrf: path-verification-required flag
+ * @cpt: channel-path timeout
+ * @fsavf: failing storage address validity flag
+ * @cons: concurrent sense
+ * @scavf: secondary ccw address validity flag
+ * @fsaf: failing storage address format
+ * @scnt: sense count, if @cons == %1
+ * @res16: reserved
+ */
+struct erw {
+ __u32 res0 : 3;
+ __u32 auth : 1;
+ __u32 pvrf : 1;
+ __u32 cpt : 1;
+ __u32 fsavf : 1;
+ __u32 cons : 1;
+ __u32 scavf : 1;
+ __u32 fsaf : 1;
+ __u32 scnt : 6;
+ __u32 res16 : 16;
+} __attribute__ ((packed));
+
+/**
+ * struct erw_eadm - EADM Subchannel extended report word
+ * @b: aob error
+ * @r: arsb error
+ */
+struct erw_eadm {
+ __u32 : 16;
+ __u32 b : 1;
+ __u32 r : 1;
+ __u32 : 14;
+} __packed;
+
+/**
+ * struct sublog - subchannel logout area
+ * @res0: reserved
+ * @esf: extended status flags
+ * @lpum: last path used mask
+ * @arep: ancillary report
+ * @fvf: field-validity flags
+ * @sacc: storage access code
+ * @termc: termination code
+ * @devsc: device-status check
+ * @serr: secondary error
+ * @ioerr: i/o-error alert
+ * @seqc: sequence code
+ */
+struct sublog {
+ __u32 res0 : 1;
+ __u32 esf : 7;
+ __u32 lpum : 8;
+ __u32 arep : 1;
+ __u32 fvf : 5;
+ __u32 sacc : 2;
+ __u32 termc : 2;
+ __u32 devsc : 1;
+ __u32 serr : 1;
+ __u32 ioerr : 1;
+ __u32 seqc : 3;
+} __attribute__ ((packed));
+
+/**
+ * struct esw0 - Format 0 Extended Status Word (ESW)
+ * @sublog: subchannel logout
+ * @erw: extended report word
+ * @faddr: failing storage address
+ * @saddr: secondary ccw address
+ */
+struct esw0 {
+ struct sublog sublog;
+ struct erw erw;
+ __u32 faddr[2];
+ __u32 saddr;
+} __attribute__ ((packed));
+
+/**
+ * struct esw1 - Format 1 Extended Status Word (ESW)
+ * @zero0: reserved zeros
+ * @lpum: last path used mask
+ * @zero16: reserved zeros
+ * @erw: extended report word
+ * @zeros: three fullwords of zeros
+ */
+struct esw1 {
+ __u8 zero0;
+ __u8 lpum;
+ __u16 zero16;
+ struct erw erw;
+ __u32 zeros[3];
+} __attribute__ ((packed));
+
+/**
+ * struct esw2 - Format 2 Extended Status Word (ESW)
+ * @zero0: reserved zeros
+ * @lpum: last path used mask
+ * @dcti: device-connect-time interval
+ * @erw: extended report word
+ * @zeros: three fullwords of zeros
+ */
+struct esw2 {
+ __u8 zero0;
+ __u8 lpum;
+ __u16 dcti;
+ struct erw erw;
+ __u32 zeros[3];
+} __attribute__ ((packed));
+
+/**
+ * struct esw3 - Format 3 Extended Status Word (ESW)
+ * @zero0: reserved zeros
+ * @lpum: last path used mask
+ * @res: reserved
+ * @erw: extended report word
+ * @zeros: three fullwords of zeros
+ */
+struct esw3 {
+ __u8 zero0;
+ __u8 lpum;
+ __u16 res;
+ struct erw erw;
+ __u32 zeros[3];
+} __attribute__ ((packed));
+
+/**
+ * struct esw_eadm - EADM Subchannel Extended Status Word (ESW)
+ * @sublog: subchannel logout
+ * @erw: extended report word
+ */
+struct esw_eadm {
+ __u32 sublog;
+ struct erw_eadm erw;
+ __u32 : 32;
+ __u32 : 32;
+ __u32 : 32;
+} __packed;
+
+/**
+ * struct irb - interruption response block
+ * @scsw: subchannel status word
+ * @esw: extended status word
+ * @ecw: extended control word
+ *
+ * The irb that is handed to the device driver when an interrupt occurs. For
+ * solicited interrupts, the common I/O layer already performs checks whether
+ * a field is valid; a field not being valid is always passed as %0.
+ * If a unit check occurred, @ecw may contain sense data; this is retrieved
+ * by the common I/O layer itself if the device doesn't support concurrent
+ * sense (so that the device driver never needs to perform basic sene itself).
+ * For unsolicited interrupts, the irb is passed as-is (expect for sense data,
+ * if applicable).
+ */
+struct irb {
+ union scsw scsw;
+ union {
+ struct esw0 esw0;
+ struct esw1 esw1;
+ struct esw2 esw2;
+ struct esw3 esw3;
+ struct esw_eadm eadm;
+ } esw;
+ __u8 ecw[32];
+} __attribute__ ((packed,aligned(4)));
+
+/**
+ * struct ciw - command information word (CIW) layout
+ * @et: entry type
+ * @reserved: reserved bits
+ * @ct: command type
+ * @cmd: command code
+ * @count: command count
+ */
+struct ciw {
+ __u32 et : 2;
+ __u32 reserved : 2;
+ __u32 ct : 4;
+ __u32 cmd : 8;
+ __u32 count : 16;
+} __attribute__ ((packed));
+
+#define CIW_TYPE_RCD 0x0 /* read configuration data */
+#define CIW_TYPE_SII 0x1 /* set interface identifier */
+#define CIW_TYPE_RNI 0x2 /* read node identifier */
+
+/*
+ * Flags used as input parameters for do_IO()
+ */
+#define DOIO_ALLOW_SUSPEND 0x0001 /* allow for channel prog. suspend */
+#define DOIO_DENY_PREFETCH 0x0002 /* don't allow for CCW prefetch */
+#define DOIO_SUPPRESS_INTER 0x0004 /* suppress intermediate inter. */
+ /* ... for suspended CCWs */
+/* Device or subchannel gone. */
+#define CIO_GONE 0x0001
+/* No path to device. */
+#define CIO_NO_PATH 0x0002
+/* Device has appeared. */
+#define CIO_OPER 0x0004
+/* Sick revalidation of device. */
+#define CIO_REVALIDATE 0x0008
+/* Device did not respond in time. */
+#define CIO_BOXED 0x0010
+
+/**
+ * struct ccw_dev_id - unique identifier for ccw devices
+ * @ssid: subchannel set id
+ * @devno: device number
+ *
+ * This structure is not directly based on any hardware structure. The
+ * hardware identifies a device by its device number and its subchannel,
+ * which is in turn identified by its id. In order to get a unique identifier
+ * for ccw devices across subchannel sets, @struct ccw_dev_id has been
+ * introduced.
+ */
+struct ccw_dev_id {
+ u8 ssid;
+ u16 devno;
+};
+
+/**
+ * ccw_device_id_is_equal() - compare two ccw_dev_ids
+ * @dev_id1: a ccw_dev_id
+ * @dev_id2: another ccw_dev_id
+ * Returns:
+ * %1 if the two structures are equal field-by-field,
+ * %0 if not.
+ * Context:
+ * any
+ */
+static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
+ struct ccw_dev_id *dev_id2)
+{
+ if ((dev_id1->ssid == dev_id2->ssid) &&
+ (dev_id1->devno == dev_id2->devno))
+ return 1;
+ return 0;
+}
+
+void channel_subsystem_reinit(void);
+extern void css_schedule_reprobe(void);
+
+extern void reipl_ccw_dev(struct ccw_dev_id *id);
+
+struct cio_iplinfo {
+ u16 devno;
+ int is_qdio;
+};
+
+extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
+
+/* Function from drivers/s390/cio/chsc.c */
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
+int chsc_sstpi(void *page, void *result, size_t size);
+
+#endif
diff --git a/kernel/arch/s390/include/asm/clp.h b/kernel/arch/s390/include/asm/clp.h
new file mode 100644
index 000000000..a0e71a501
--- /dev/null
+++ b/kernel/arch/s390/include/asm/clp.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_S390_CLP_H
+#define _ASM_S390_CLP_H
+
+/* CLP common request & response block size */
+#define CLP_BLK_SIZE PAGE_SIZE
+
+struct clp_req_hdr {
+ u16 len;
+ u16 cmd;
+} __packed;
+
+struct clp_rsp_hdr {
+ u16 len;
+ u16 rsp;
+} __packed;
+
+/* CLP Response Codes */
+#define CLP_RC_OK 0x0010 /* Command request successfully */
+#define CLP_RC_CMD 0x0020 /* Command code not recognized */
+#define CLP_RC_PERM 0x0030 /* Command not authorized */
+#define CLP_RC_FMT 0x0040 /* Invalid command request format */
+#define CLP_RC_LEN 0x0050 /* Invalid command request length */
+#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */
+#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */
+#define CLP_RC_NODATA 0x0080 /* No data available */
+#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
+
+#endif
diff --git a/kernel/arch/s390/include/asm/cmb.h b/kernel/arch/s390/include/asm/cmb.h
new file mode 100644
index 000000000..806eac12e
--- /dev/null
+++ b/kernel/arch/s390/include/asm/cmb.h
@@ -0,0 +1,12 @@
+#ifndef S390_CMB_H
+#define S390_CMB_H
+
+#include <uapi/asm/cmb.h>
+
+struct ccw_device;
+extern int enable_cmf(struct ccw_device *cdev);
+extern int disable_cmf(struct ccw_device *cdev);
+extern u64 cmf_read(struct ccw_device *cdev, int index);
+extern int cmf_readall(struct ccw_device *cdev, struct cmbdata *data);
+
+#endif /* S390_CMB_H */
diff --git a/kernel/arch/s390/include/asm/cmpxchg.h b/kernel/arch/s390/include/asm/cmpxchg.h
new file mode 100644
index 000000000..4eadec466
--- /dev/null
+++ b/kernel/arch/s390/include/asm/cmpxchg.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright IBM Corp. 1999, 2011
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ */
+
+#ifndef __ASM_CMPXCHG_H
+#define __ASM_CMPXCHG_H
+
+#include <linux/mmdebug.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __o = (o); \
+ __typeof__(*(ptr)) __n = (n); \
+ (__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\
+})
+
+#define cmpxchg64 cmpxchg
+#define cmpxchg_local cmpxchg
+#define cmpxchg64_local cmpxchg
+
+#define xchg(ptr, x) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old; \
+ do { \
+ __old = *__ptr; \
+ } while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \
+ __old; \
+})
+
+#define __HAVE_ARCH_CMPXCHG
+
+#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
+({ \
+ register __typeof__(*(p1)) __old1 asm("2") = (o1); \
+ register __typeof__(*(p2)) __old2 asm("3") = (o2); \
+ register __typeof__(*(p1)) __new1 asm("4") = (n1); \
+ register __typeof__(*(p2)) __new2 asm("5") = (n2); \
+ int cc; \
+ asm volatile( \
+ insn " %[old],%[new],%[ptr]\n" \
+ " ipm %[cc]\n" \
+ " srl %[cc],28" \
+ : [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \
+ : [new] "d" (__new1), "d" (__new2), \
+ [ptr] "Q" (*(p1)), "Q" (*(p2)) \
+ : "memory", "cc"); \
+ !cc; \
+})
+
+#define __cmpxchg_double_4(p1, p2, o1, o2, n1, n2) \
+ __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cds")
+
+#define __cmpxchg_double_8(p1, p2, o1, o2, n1, n2) \
+ __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cdsg")
+
+extern void __cmpxchg_double_called_with_bad_pointer(void);
+
+#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
+({ \
+ int __ret; \
+ switch (sizeof(*(p1))) { \
+ case 4: \
+ __ret = __cmpxchg_double_4(p1, p2, o1, o2, n1, n2); \
+ break; \
+ case 8: \
+ __ret = __cmpxchg_double_8(p1, p2, o1, o2, n1, n2); \
+ break; \
+ default: \
+ __cmpxchg_double_called_with_bad_pointer(); \
+ } \
+ __ret; \
+})
+
+#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
+({ \
+ __typeof__(p1) __p1 = (p1); \
+ __typeof__(p2) __p2 = (p2); \
+ BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
+ BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
+ VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
+ __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
+})
+
+#define system_has_cmpxchg_double() 1
+
+#endif /* __ASM_CMPXCHG_H */
diff --git a/kernel/arch/s390/include/asm/compat.h b/kernel/arch/s390/include/asm/compat.h
new file mode 100644
index 000000000..d350ed9d0
--- /dev/null
+++ b/kernel/arch/s390/include/asm/compat.h
@@ -0,0 +1,359 @@
+#ifndef _ASM_S390X_COMPAT_H
+#define _ASM_S390X_COMPAT_H
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+
+#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64))
+
+#define __SC_DELOUSE(t,v) ({ \
+ BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \
+ (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \
+})
+
+#define PSW32_MASK_PER 0x40000000UL
+#define PSW32_MASK_DAT 0x04000000UL
+#define PSW32_MASK_IO 0x02000000UL
+#define PSW32_MASK_EXT 0x01000000UL
+#define PSW32_MASK_KEY 0x00F00000UL
+#define PSW32_MASK_BASE 0x00080000UL /* Always one */
+#define PSW32_MASK_MCHECK 0x00040000UL
+#define PSW32_MASK_WAIT 0x00020000UL
+#define PSW32_MASK_PSTATE 0x00010000UL
+#define PSW32_MASK_ASC 0x0000C000UL
+#define PSW32_MASK_CC 0x00003000UL
+#define PSW32_MASK_PM 0x00000f00UL
+#define PSW32_MASK_RI 0x00000080UL
+
+#define PSW32_MASK_USER 0x0000FF00UL
+
+#define PSW32_ADDR_AMODE 0x80000000UL
+#define PSW32_ADDR_INSN 0x7FFFFFFFUL
+
+#define PSW32_DEFAULT_KEY (((u32) PAGE_DEFAULT_ACC) << 20)
+
+#define PSW32_ASC_PRIMARY 0x00000000UL
+#define PSW32_ASC_ACCREG 0x00004000UL
+#define PSW32_ASC_SECONDARY 0x00008000UL
+#define PSW32_ASC_HOME 0x0000C000UL
+
+#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
+ PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
+ PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \
+ PSW32_ASC_PRIMARY)
+
+#define COMPAT_USER_HZ 100
+#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
+
+typedef u32 compat_size_t;
+typedef s32 compat_ssize_t;
+typedef s32 compat_time_t;
+typedef s32 compat_clock_t;
+typedef s32 compat_pid_t;
+typedef u16 __compat_uid_t;
+typedef u16 __compat_gid_t;
+typedef u32 __compat_uid32_t;
+typedef u32 __compat_gid32_t;
+typedef u16 compat_mode_t;
+typedef u32 compat_ino_t;
+typedef u16 compat_dev_t;
+typedef s32 compat_off_t;
+typedef s64 compat_loff_t;
+typedef u16 compat_nlink_t;
+typedef u16 compat_ipc_pid_t;
+typedef s32 compat_daddr_t;
+typedef u32 compat_caddr_t;
+typedef __kernel_fsid_t compat_fsid_t;
+typedef s32 compat_key_t;
+typedef s32 compat_timer_t;
+
+typedef s32 compat_int_t;
+typedef s32 compat_long_t;
+typedef s64 compat_s64;
+typedef u32 compat_uint_t;
+typedef u32 compat_ulong_t;
+typedef u64 compat_u64;
+typedef u32 compat_uptr_t;
+
+typedef struct {
+ u32 mask;
+ u32 addr;
+} __aligned(8) psw_compat_t;
+
+typedef struct {
+ psw_compat_t psw;
+ u32 gprs[NUM_GPRS];
+ u32 acrs[NUM_ACRS];
+ u32 orig_gpr2;
+} s390_compat_regs;
+
+typedef struct {
+ u32 gprs_high[NUM_GPRS];
+} s390_compat_regs_high;
+
+struct compat_timespec {
+ compat_time_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct compat_timeval {
+ compat_time_t tv_sec;
+ s32 tv_usec;
+};
+
+struct compat_stat {
+ compat_dev_t st_dev;
+ u16 __pad1;
+ compat_ino_t st_ino;
+ compat_mode_t st_mode;
+ compat_nlink_t st_nlink;
+ __compat_uid_t st_uid;
+ __compat_gid_t st_gid;
+ compat_dev_t st_rdev;
+ u16 __pad2;
+ u32 st_size;
+ u32 st_blksize;
+ u32 st_blocks;
+ u32 st_atime;
+ u32 st_atime_nsec;
+ u32 st_mtime;
+ u32 st_mtime_nsec;
+ u32 st_ctime;
+ u32 st_ctime_nsec;
+ u32 __unused4;
+ u32 __unused5;
+};
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+ compat_pid_t l_pid;
+};
+
+#define F_GETLK64 12
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+};
+
+struct compat_statfs {
+ u32 f_type;
+ u32 f_bsize;
+ u32 f_blocks;
+ u32 f_bfree;
+ u32 f_bavail;
+ u32 f_files;
+ u32 f_ffree;
+ compat_fsid_t f_fsid;
+ u32 f_namelen;
+ u32 f_frsize;
+ u32 f_flags;
+ u32 f_spare[4];
+};
+
+struct compat_statfs64 {
+ u32 f_type;
+ u32 f_bsize;
+ u64 f_blocks;
+ u64 f_bfree;
+ u64 f_bavail;
+ u64 f_files;
+ u64 f_ffree;
+ compat_fsid_t f_fsid;
+ u32 f_namelen;
+ u32 f_frsize;
+ u32 f_flags;
+ u32 f_spare[4];
+};
+
+#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
+#define COMPAT_RLIM_INFINITY 0xffffffff
+
+typedef u32 compat_old_sigset_t; /* at least 32 bits */
+
+#define _COMPAT_NSIG 64
+#define _COMPAT_NSIG_BPW 32
+
+typedef u32 compat_sigset_word;
+
+typedef union compat_sigval {
+ compat_int_t sival_int;
+ compat_uptr_t sival_ptr;
+} compat_sigval_t;
+
+typedef struct compat_siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[128/sizeof(int) - 3];
+
+ /* kill() */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ pid_t _pid; /* which child */
+ uid_t _uid; /* sender's uid */
+ int _status;/* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ __u32 _addr; /* faulting insn/memory ref. - pointer */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} compat_siginfo_t;
+
+/*
+ * How these fields are to be accessed.
+ */
+#define si_pid _sifields._kill._pid
+#define si_uid _sifields._kill._uid
+#define si_status _sifields._sigchld._status
+#define si_utime _sifields._sigchld._utime
+#define si_stime _sifields._sigchld._stime
+#define si_value _sifields._rt._sigval
+#define si_int _sifields._rt._sigval.sival_int
+#define si_ptr _sifields._rt._sigval.sival_ptr
+#define si_addr _sifields._sigfault._addr
+#define si_band _sifields._sigpoll._band
+#define si_fd _sifields._sigpoll._fd
+#define si_tid _sifields._timer._tid
+#define si_overrun _sifields._timer._overrun
+
+#define COMPAT_OFF_T_MAX 0x7fffffff
+#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
+
+/*
+ * A pointer passed in from user mode. This should not
+ * be used for syscall parameters, just declare them
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+ return (void __user *)(unsigned long)(uptr & 0x7fffffffUL);
+}
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+ return (u32)(unsigned long)uptr;
+}
+
+#ifdef CONFIG_COMPAT
+
+static inline int is_compat_task(void)
+{
+ return is_32bit_task();
+}
+
+static inline void __user *arch_compat_alloc_user_space(long len)
+{
+ unsigned long stack;
+
+ stack = KSTK_ESP(current);
+ if (is_compat_task())
+ stack &= 0x7fffffffUL;
+ return (void __user *) (stack - len);
+}
+
+#endif
+
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid32_t uid;
+ __compat_gid32_t gid;
+ __compat_uid32_t cuid;
+ __compat_gid32_t cgid;
+ compat_mode_t mode;
+ unsigned short __pad1;
+ unsigned short seq;
+ unsigned short __pad2;
+ unsigned int __unused1;
+ unsigned int __unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ compat_time_t sem_otime;
+ compat_ulong_t __pad1;
+ compat_time_t sem_ctime;
+ compat_ulong_t __pad2;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t __unused1;
+ compat_ulong_t __unused2;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+ compat_time_t msg_stime;
+ compat_ulong_t __pad1;
+ compat_time_t msg_rtime;
+ compat_ulong_t __pad2;
+ compat_time_t msg_ctime;
+ compat_ulong_t __pad3;
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused1;
+ compat_ulong_t __unused2;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ compat_size_t shm_segsz;
+ compat_time_t shm_atime;
+ compat_ulong_t __pad1;
+ compat_time_t shm_dtime;
+ compat_ulong_t __pad2;
+ compat_time_t shm_ctime;
+ compat_ulong_t __pad3;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ulong_t __unused1;
+ compat_ulong_t __unused2;
+};
+#endif /* _ASM_S390X_COMPAT_H */
diff --git a/kernel/arch/s390/include/asm/cpcmd.h b/kernel/arch/s390/include/asm/cpcmd.h
new file mode 100644
index 000000000..3dfadb5d6
--- /dev/null
+++ b/kernel/arch/s390/include/asm/cpcmd.h
@@ -0,0 +1,32 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Christian Borntraeger (cborntra@de.ibm.com),
+ */
+
+#ifndef _ASM_S390_CPCMD_H
+#define _ASM_S390_CPCMD_H
+
+/*
+ * the lowlevel function for cpcmd
+ * the caller of __cpcmd has to ensure that the response buffer is below 2 GB
+ */
+extern int __cpcmd(const char *cmd, char *response, int rlen, int *response_code);
+
+/*
+ * cpcmd is the in-kernel interface for issuing CP commands
+ *
+ * cmd: null-terminated command string, max 240 characters
+ * response: response buffer for VM's textual response
+ * rlen: size of the response buffer, cpcmd will not exceed this size
+ * but will cap the output, if its too large. Everything that
+ * did not fit into the buffer will be silently dropped
+ * response_code: return pointer for VM's error code
+ * return value: the size of the response. The caller can check if the buffer
+ * was large enough by comparing the return value and rlen
+ * NOTE: If the response buffer is not below 2 GB, cpcmd can sleep
+ */
+extern int cpcmd(const char *cmd, char *response, int rlen, int *response_code);
+
+#endif /* _ASM_S390_CPCMD_H */
diff --git a/kernel/arch/s390/include/asm/cpu.h b/kernel/arch/s390/include/asm/cpu.h
new file mode 100644
index 000000000..f5a8e2fcd
--- /dev/null
+++ b/kernel/arch/s390/include/asm/cpu.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright IBM Corp. 2000, 2009
+ * Author(s): Hartmut Penner <hp@de.ibm.com>,
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Christian Ehrhardt <ehrhardt@de.ibm.com>,
+ */
+
+#ifndef _ASM_S390_CPU_H
+#define _ASM_S390_CPU_H
+
+#define MAX_CPU_ADDRESS 255
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+struct cpuid
+{
+ unsigned int version : 8;
+ unsigned int ident : 24;
+ unsigned int machine : 16;
+ unsigned int unused : 16;
+} __attribute__ ((packed, aligned(8)));
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_S390_CPU_H */
diff --git a/kernel/arch/s390/include/asm/cpu_mf.h b/kernel/arch/s390/include/asm/cpu_mf.h
new file mode 100644
index 000000000..5243a8679
--- /dev/null
+++ b/kernel/arch/s390/include/asm/cpu_mf.h
@@ -0,0 +1,297 @@
+/*
+ * CPU-measurement facilities
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+#ifndef _ASM_S390_CPU_MF_H
+#define _ASM_S390_CPU_MF_H
+
+#include <linux/errno.h>
+#include <asm/facility.h>
+
+#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */
+#define CPU_MF_INT_SF_ISE (1 << 30) /* incorrect SDBT entry */
+#define CPU_MF_INT_SF_PRA (1 << 29) /* program request alert */
+#define CPU_MF_INT_SF_SACA (1 << 23) /* sampler auth. change alert */
+#define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */
+#define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */
+#define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */
+#define CPU_MF_INT_RI_HALTED (1 << 5) /* run-time instr. halted */
+#define CPU_MF_INT_RI_BUF_FULL (1 << 4) /* run-time instr. program
+ buffer full */
+
+#define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA)
+#define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \
+ CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \
+ CPU_MF_INT_SF_LSDA)
+#define CPU_MF_INT_RI_MASK (CPU_MF_INT_RI_HALTED|CPU_MF_INT_RI_BUF_FULL)
+
+/* CPU measurement facility support */
+static inline int cpum_cf_avail(void)
+{
+ return MACHINE_HAS_LPP && test_facility(67);
+}
+
+static inline int cpum_sf_avail(void)
+{
+ return MACHINE_HAS_LPP && test_facility(68);
+}
+
+
+struct cpumf_ctr_info {
+ u16 cfvn;
+ u16 auth_ctl;
+ u16 enable_ctl;
+ u16 act_ctl;
+ u16 max_cpu;
+ u16 csvn;
+ u16 max_cg;
+ u16 reserved1;
+ u32 reserved2[12];
+} __packed;
+
+/* QUERY SAMPLING INFORMATION block */
+struct hws_qsi_info_block { /* Bit(s) */
+ unsigned int b0_13:14; /* 0-13: zeros */
+ unsigned int as:1; /* 14: basic-sampling authorization */
+ unsigned int ad:1; /* 15: diag-sampling authorization */
+ unsigned int b16_21:6; /* 16-21: zeros */
+ unsigned int es:1; /* 22: basic-sampling enable control */
+ unsigned int ed:1; /* 23: diag-sampling enable control */
+ unsigned int b24_29:6; /* 24-29: zeros */
+ unsigned int cs:1; /* 30: basic-sampling activation control */
+ unsigned int cd:1; /* 31: diag-sampling activation control */
+ unsigned int bsdes:16; /* 4-5: size of basic sampling entry */
+ unsigned int dsdes:16; /* 6-7: size of diagnostic sampling entry */
+ unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */
+ unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
+ unsigned long tear; /* 24-31: TEAR contents */
+ unsigned long dear; /* 32-39: DEAR contents */
+ unsigned int rsvrd0; /* 40-43: reserved */
+ unsigned int cpu_speed; /* 44-47: CPU speed */
+ unsigned long long rsvrd1; /* 48-55: reserved */
+ unsigned long long rsvrd2; /* 56-63: reserved */
+} __packed;
+
+/* SET SAMPLING CONTROLS request block */
+struct hws_lsctl_request_block {
+ unsigned int s:1; /* 0: maximum buffer indicator */
+ unsigned int h:1; /* 1: part. level reserved for VM use*/
+ unsigned long long b2_53:52;/* 2-53: zeros */
+ unsigned int es:1; /* 54: basic-sampling enable control */
+ unsigned int ed:1; /* 55: diag-sampling enable control */
+ unsigned int b56_61:6; /* 56-61: - zeros */
+ unsigned int cs:1; /* 62: basic-sampling activation control */
+ unsigned int cd:1; /* 63: diag-sampling activation control */
+ unsigned long interval; /* 8-15: sampling interval */
+ unsigned long tear; /* 16-23: TEAR contents */
+ unsigned long dear; /* 24-31: DEAR contents */
+ /* 32-63: */
+ unsigned long rsvrd1; /* reserved */
+ unsigned long rsvrd2; /* reserved */
+ unsigned long rsvrd3; /* reserved */
+ unsigned long rsvrd4; /* reserved */
+} __packed;
+
+struct hws_basic_entry {
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int R:4; /* 16-19 reserved */
+ unsigned int U:4; /* 20-23 Number of unique instruct. */
+ unsigned int z:2; /* zeros */
+ unsigned int T:1; /* 26 PSW DAT mode */
+ unsigned int W:1; /* 27 PSW wait state */
+ unsigned int P:1; /* 28 PSW Problem state */
+ unsigned int AS:2; /* 29-30 PSW address-space control */
+ unsigned int I:1; /* 31 entry valid or invalid */
+ unsigned int:16;
+ unsigned int prim_asn:16; /* primary ASN */
+ unsigned long long ia; /* Instruction Address */
+ unsigned long long gpp; /* Guest Program Parameter */
+ unsigned long long hpp; /* Host Program Parameter */
+} __packed;
+
+struct hws_diag_entry {
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int R:14; /* 16-19 and 20-30 reserved */
+ unsigned int I:1; /* 31 entry valid or invalid */
+ u8 data[]; /* Machine-dependent sample data */
+} __packed;
+
+struct hws_combined_entry {
+ struct hws_basic_entry basic; /* Basic-sampling data entry */
+ struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
+} __packed;
+
+struct hws_trailer_entry {
+ union {
+ struct {
+ unsigned int f:1; /* 0 - Block Full Indicator */
+ unsigned int a:1; /* 1 - Alert request control */
+ unsigned int t:1; /* 2 - Timestamp format */
+ unsigned long long:61; /* 3 - 63: Reserved */
+ };
+ unsigned long long flags; /* 0 - 63: All indicators */
+ };
+ unsigned long long overflow; /* 64 - sample Overflow count */
+ unsigned char timestamp[16]; /* 16 - 31 timestamp */
+ unsigned long long reserved1; /* 32 -Reserved */
+ unsigned long long reserved2; /* */
+ unsigned long long progusage1; /* 48 - reserved for programming use */
+ unsigned long long progusage2; /* */
+} __packed;
+
+/* Query counter information */
+static inline int qctri(struct cpumf_ctr_info *info)
+{
+ int rc = -EINVAL;
+
+ asm volatile (
+ "0: .insn s,0xb28e0000,%1\n"
+ "1: lhi %0,0\n"
+ "2:\n"
+ EX_TABLE(1b, 2b)
+ : "+d" (rc), "=Q" (*info));
+ return rc;
+}
+
+/* Load CPU-counter-set controls */
+static inline int lcctl(u64 ctl)
+{
+ int cc;
+
+ asm volatile (
+ " .insn s,0xb2840000,%1\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc) : "m" (ctl) : "cc");
+ return cc;
+}
+
+/* Extract CPU counter */
+static inline int ecctr(u64 ctr, u64 *val)
+{
+ register u64 content asm("4") = 0;
+ int cc;
+
+ asm volatile (
+ " .insn rre,0xb2e40000,%0,%2\n"
+ " ipm %1\n"
+ " srl %1,28\n"
+ : "=d" (content), "=d" (cc) : "d" (ctr) : "cc");
+ if (!cc)
+ *val = content;
+ return cc;
+}
+
+/* Store CPU counter multiple for the MT utilization counter set */
+static inline int stcctm5(u64 num, u64 *val)
+{
+ typedef struct { u64 _[num]; } addrtype;
+ int cc;
+
+ asm volatile (
+ " .insn rsy,0xeb0000000017,%2,5,%1\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc), "=Q" (*(addrtype *) val) : "d" (num) : "cc");
+ return cc;
+}
+
+/* Query sampling information */
+static inline int qsi(struct hws_qsi_info_block *info)
+{
+ int cc;
+ cc = 1;
+
+ asm volatile(
+ "0: .insn s,0xb2860000,0(%1)\n"
+ "1: lhi %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "=d" (cc), "+a" (info)
+ : "m" (*info)
+ : "cc", "memory");
+
+ return cc ? -EINVAL : 0;
+}
+
+/* Load sampling controls */
+static inline int lsctl(struct hws_lsctl_request_block *req)
+{
+ int cc;
+
+ cc = 1;
+ asm volatile(
+ "0: .insn s,0xb2870000,0(%1)\n"
+ "1: ipm %0\n"
+ " srl %0,28\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "+d" (cc), "+a" (req)
+ : "m" (*req)
+ : "cc", "memory");
+
+ return cc ? -EINVAL : 0;
+}
+
+/* Sampling control helper functions */
+
+#include <linux/time.h>
+
+static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi,
+ unsigned long freq)
+{
+ return (USEC_PER_SEC / freq) * qsi->cpu_speed;
+}
+
+static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
+ unsigned long rate)
+{
+ return USEC_PER_SEC * qsi->cpu_speed / rate;
+}
+
+#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL
+#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL
+
+/* Return TOD timestamp contained in an trailer entry */
+static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
+{
+ /* TOD in STCKE format */
+ if (te->t)
+ return *((unsigned long long *) &te->timestamp[1]);
+
+ /* TOD in STCK format */
+ return *((unsigned long long *) &te->timestamp[0]);
+}
+
+/* Return pointer to trailer entry of an sample data block */
+static inline unsigned long *trailer_entry_ptr(unsigned long v)
+{
+ void *ret;
+
+ ret = (void *) v;
+ ret += PAGE_SIZE;
+ ret -= sizeof(struct hws_trailer_entry);
+
+ return (unsigned long *) ret;
+}
+
+/* Return if the entry in the sample data block table (sdbt)
+ * is a link to the next sdbt */
+static inline int is_link_entry(unsigned long *s)
+{
+ return *s & 0x1ul ? 1 : 0;
+}
+
+/* Return pointer to the linked sdbt */
+static inline unsigned long *get_next_sdbt(unsigned long *s)
+{
+ return (unsigned long *) (*s & ~0x1ul);
+}
+#endif /* _ASM_S390_CPU_MF_H */
diff --git a/kernel/arch/s390/include/asm/cputime.h b/kernel/arch/s390/include/asm/cputime.h
new file mode 100644
index 000000000..221b454c7
--- /dev/null
+++ b/kernel/arch/s390/include/asm/cputime.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright IBM Corp. 2004
+ *
+ * Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _S390_CPUTIME_H
+#define _S390_CPUTIME_H
+
+#include <linux/types.h>
+#include <asm/div64.h>
+
+#define CPUTIME_PER_USEC 4096ULL
+#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
+
+/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
+
+typedef unsigned long long __nocast cputime_t;
+typedef unsigned long long __nocast cputime64_t;
+
+#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
+
+static inline unsigned long __div(unsigned long long n, unsigned long base)
+{
+ return n / base;
+}
+
+#define cputime_one_jiffy jiffies_to_cputime(1)
+
+/*
+ * Convert cputime to jiffies and back.
+ */
+static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
+{
+ return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ);
+}
+
+static inline cputime_t jiffies_to_cputime(const unsigned int jif)
+{
+ return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ));
+}
+
+static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
+{
+ unsigned long long jif = (__force unsigned long long) cputime;
+ do_div(jif, CPUTIME_PER_SEC / HZ);
+ return jif;
+}
+
+static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
+{
+ return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ));
+}
+
+/*
+ * Convert cputime to microseconds and back.
+ */
+static inline unsigned int cputime_to_usecs(const cputime_t cputime)
+{
+ return (__force unsigned long long) cputime >> 12;
+}
+
+static inline cputime_t usecs_to_cputime(const unsigned int m)
+{
+ return (__force cputime_t)(m * CPUTIME_PER_USEC);
+}
+
+#define usecs_to_cputime64(m) usecs_to_cputime(m)
+
+/*
+ * Convert cputime to milliseconds and back.
+ */
+static inline unsigned int cputime_to_secs(const cputime_t cputime)
+{
+ return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1;
+}
+
+static inline cputime_t secs_to_cputime(const unsigned int s)
+{
+ return (__force cputime_t)(s * CPUTIME_PER_SEC);
+}
+
+/*
+ * Convert cputime to timespec and back.
+ */
+static inline cputime_t timespec_to_cputime(const struct timespec *value)
+{
+ unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
+ return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC));
+}
+
+static inline void cputime_to_timespec(const cputime_t cputime,
+ struct timespec *value)
+{
+ unsigned long long __cputime = (__force unsigned long long) cputime;
+ value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
+ value->tv_sec = __cputime / CPUTIME_PER_SEC;
+}
+
+/*
+ * Convert cputime to timeval and back.
+ * Since cputime and timeval have the same resolution (microseconds)
+ * this is easy.
+ */
+static inline cputime_t timeval_to_cputime(const struct timeval *value)
+{
+ unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
+ return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC);
+}
+
+static inline void cputime_to_timeval(const cputime_t cputime,
+ struct timeval *value)
+{
+ unsigned long long __cputime = (__force unsigned long long) cputime;
+ value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
+ value->tv_sec = __cputime / CPUTIME_PER_SEC;
+}
+
+/*
+ * Convert cputime to clock and back.
+ */
+static inline clock_t cputime_to_clock_t(cputime_t cputime)
+{
+ unsigned long long clock = (__force unsigned long long) cputime;
+ do_div(clock, CPUTIME_PER_SEC / USER_HZ);
+ return clock;
+}
+
+static inline cputime_t clock_t_to_cputime(unsigned long x)
+{
+ return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ));
+}
+
+/*
+ * Convert cputime64 to clock.
+ */
+static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
+{
+ unsigned long long clock = (__force unsigned long long) cputime;
+ do_div(clock, CPUTIME_PER_SEC / USER_HZ);
+ return clock;
+}
+
+cputime64_t arch_cpu_idle_time(int cpu);
+
+#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
+
+#endif /* _S390_CPUTIME_H */
diff --git a/kernel/arch/s390/include/asm/crw.h b/kernel/arch/s390/include/asm/crw.h
new file mode 100644
index 000000000..7c31d3e25
--- /dev/null
+++ b/kernel/arch/s390/include/asm/crw.h
@@ -0,0 +1,69 @@
+/*
+ * Data definitions for channel report processing
+ * Copyright IBM Corp. 2000, 2009
+ * Author(s): Ingo Adlung <adlung@de.ibm.com>,
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Cornelia Huck <cornelia.huck@de.ibm.com>,
+ * Heiko Carstens <heiko.carstens@de.ibm.com>,
+ */
+
+#ifndef _ASM_S390_CRW_H
+#define _ASM_S390_CRW_H
+
+#include <linux/types.h>
+
+/*
+ * Channel Report Word
+ */
+struct crw {
+ __u32 res1 : 1; /* reserved zero */
+ __u32 slct : 1; /* solicited */
+ __u32 oflw : 1; /* overflow */
+ __u32 chn : 1; /* chained */
+ __u32 rsc : 4; /* reporting source code */
+ __u32 anc : 1; /* ancillary report */
+ __u32 res2 : 1; /* reserved zero */
+ __u32 erc : 6; /* error-recovery code */
+ __u32 rsid : 16; /* reporting-source ID */
+} __attribute__ ((packed));
+
+typedef void (*crw_handler_t)(struct crw *, struct crw *, int);
+
+extern int crw_register_handler(int rsc, crw_handler_t handler);
+extern void crw_unregister_handler(int rsc);
+extern void crw_handle_channel_report(void);
+void crw_wait_for_channel_report(void);
+
+#define NR_RSCS 16
+
+#define CRW_RSC_MONITOR 0x2 /* monitoring facility */
+#define CRW_RSC_SCH 0x3 /* subchannel */
+#define CRW_RSC_CPATH 0x4 /* channel path */
+#define CRW_RSC_CONFIG 0x9 /* configuration-alert facility */
+#define CRW_RSC_CSS 0xB /* channel subsystem */
+
+#define CRW_ERC_EVENT 0x00 /* event information pending */
+#define CRW_ERC_AVAIL 0x01 /* available */
+#define CRW_ERC_INIT 0x02 /* initialized */
+#define CRW_ERC_TERROR 0x03 /* temporary error */
+#define CRW_ERC_IPARM 0x04 /* installed parm initialized */
+#define CRW_ERC_TERM 0x05 /* terminal */
+#define CRW_ERC_PERRN 0x06 /* perm. error, fac. not init */
+#define CRW_ERC_PERRI 0x07 /* perm. error, facility init */
+#define CRW_ERC_PMOD 0x08 /* installed parameters modified */
+
+static inline int stcrw(struct crw *pcrw)
+{
+ int ccode;
+
+ asm volatile(
+ " stcrw 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (ccode), "=m" (*pcrw)
+ : "a" (pcrw)
+ : "cc" );
+ return ccode;
+}
+
+#endif /* _ASM_S390_CRW_H */
diff --git a/kernel/arch/s390/include/asm/css_chars.h b/kernel/arch/s390/include/asm/css_chars.h
new file mode 100644
index 000000000..09d1dd46b
--- /dev/null
+++ b/kernel/arch/s390/include/asm/css_chars.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_CSS_CHARS_H
+#define _ASM_CSS_CHARS_H
+
+#include <linux/types.h>
+
+struct css_general_char {
+ u64 : 12;
+ u32 dynio : 1; /* bit 12 */
+ u32 : 4;
+ u32 eadm : 1; /* bit 17 */
+ u32 : 23;
+ u32 aif : 1; /* bit 41 */
+ u32 : 3;
+ u32 mcss : 1; /* bit 45 */
+ u32 fcs : 1; /* bit 46 */
+ u32 : 1;
+ u32 ext_mb : 1; /* bit 48 */
+ u32 : 7;
+ u32 aif_tdd : 1; /* bit 56 */
+ u32 : 1;
+ u32 qebsm : 1; /* bit 58 */
+ u32 : 8;
+ u32 aif_osa : 1; /* bit 67 */
+ u32 : 12;
+ u32 eadm_rf : 1; /* bit 80 */
+ u32 : 1;
+ u32 cib : 1; /* bit 82 */
+ u32 : 5;
+ u32 fcx : 1; /* bit 88 */
+ u32 : 19;
+ u32 alt_ssi : 1; /* bit 108 */
+ u32:1;
+ u32 narf:1; /* bit 110 */
+} __packed;
+
+extern struct css_general_char css_general_characteristics;
+
+#endif
diff --git a/kernel/arch/s390/include/asm/ctl_reg.h b/kernel/arch/s390/include/asm/ctl_reg.h
new file mode 100644
index 000000000..cfad7fca0
--- /dev/null
+++ b/kernel/arch/s390/include/asm/ctl_reg.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_CTL_REG_H
+#define __ASM_CTL_REG_H
+
+#include <linux/bug.h>
+
+#define __ctl_load(array, low, high) { \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ \
+ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+ asm volatile( \
+ " lctlg %1,%2,%0\n" \
+ : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+}
+
+#define __ctl_store(array, low, high) { \
+ typedef struct { char _[sizeof(array)]; } addrtype; \
+ \
+ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+ asm volatile( \
+ " stctg %1,%2,%0\n" \
+ : "=Q" (*(addrtype *)(&array)) \
+ : "i" (low), "i" (high)); \
+}
+
+static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
+{
+ unsigned long reg;
+
+ __ctl_store(reg, cr, cr);
+ reg |= 1UL << bit;
+ __ctl_load(reg, cr, cr);
+}
+
+static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
+{
+ unsigned long reg;
+
+ __ctl_store(reg, cr, cr);
+ reg &= ~(1UL << bit);
+ __ctl_load(reg, cr, cr);
+}
+
+void smp_ctl_set_bit(int cr, int bit);
+void smp_ctl_clear_bit(int cr, int bit);
+
+union ctlreg0 {
+ unsigned long val;
+ struct {
+ unsigned long : 32;
+ unsigned long : 3;
+ unsigned long lap : 1; /* Low-address-protection control */
+ unsigned long : 4;
+ unsigned long edat : 1; /* Enhanced-DAT-enablement control */
+ unsigned long : 23;
+ };
+};
+
+#ifdef CONFIG_SMP
+# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
+#else
+# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
+#endif
+
+#endif /* __ASM_CTL_REG_H */
diff --git a/kernel/arch/s390/include/asm/current.h b/kernel/arch/s390/include/asm/current.h
new file mode 100644
index 000000000..b80941f30
--- /dev/null
+++ b/kernel/arch/s390/include/asm/current.h
@@ -0,0 +1,18 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/current.h"
+ */
+
+#ifndef _S390_CURRENT_H
+#define _S390_CURRENT_H
+
+#include <asm/lowcore.h>
+
+struct task_struct;
+
+#define current ((struct task_struct *const)S390_lowcore.current_task)
+
+#endif /* !(_S390_CURRENT_H) */
diff --git a/kernel/arch/s390/include/asm/debug.h b/kernel/arch/s390/include/asm/debug.h
new file mode 100644
index 000000000..0206c8052
--- /dev/null
+++ b/kernel/arch/s390/include/asm/debug.h
@@ -0,0 +1,262 @@
+/*
+ * S/390 debug facility
+ *
+ * Copyright IBM Corp. 1999, 2000
+ */
+#ifndef DEBUG_H
+#define DEBUG_H
+
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <uapi/asm/debug.h>
+
+#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
+#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
+#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */
+#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */
+#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */
+#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */
+
+#define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */
+
+#define DEBUG_DATA(entry) (char*)(entry + 1) /* data is stored behind */
+ /* the entry information */
+
+typedef struct __debug_entry debug_entry_t;
+
+struct debug_view;
+
+typedef struct debug_info {
+ struct debug_info* next;
+ struct debug_info* prev;
+ atomic_t ref_count;
+ spinlock_t lock;
+ int level;
+ int nr_areas;
+ int pages_per_area;
+ int buf_size;
+ int entry_size;
+ debug_entry_t*** areas;
+ int active_area;
+ int *active_pages;
+ int *active_entries;
+ struct dentry* debugfs_root_entry;
+ struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
+ struct debug_view* views[DEBUG_MAX_VIEWS];
+ char name[DEBUG_MAX_NAME_LEN];
+ umode_t mode;
+} debug_info_t;
+
+typedef int (debug_header_proc_t) (debug_info_t* id,
+ struct debug_view* view,
+ int area,
+ debug_entry_t* entry,
+ char* out_buf);
+
+typedef int (debug_format_proc_t) (debug_info_t* id,
+ struct debug_view* view, char* out_buf,
+ const char* in_buf);
+typedef int (debug_prolog_proc_t) (debug_info_t* id,
+ struct debug_view* view,
+ char* out_buf);
+typedef int (debug_input_proc_t) (debug_info_t* id,
+ struct debug_view* view,
+ struct file* file,
+ const char __user *user_buf,
+ size_t in_buf_size, loff_t* offset);
+
+int debug_dflt_header_fn(debug_info_t* id, struct debug_view* view,
+ int area, debug_entry_t* entry, char* out_buf);
+
+struct debug_view {
+ char name[DEBUG_MAX_NAME_LEN];
+ debug_prolog_proc_t* prolog_proc;
+ debug_header_proc_t* header_proc;
+ debug_format_proc_t* format_proc;
+ debug_input_proc_t* input_proc;
+ void* private_data;
+};
+
+extern struct debug_view debug_hex_ascii_view;
+extern struct debug_view debug_raw_view;
+extern struct debug_view debug_sprintf_view;
+
+/* do NOT use the _common functions */
+
+debug_entry_t* debug_event_common(debug_info_t* id, int level,
+ const void* data, int length);
+
+debug_entry_t* debug_exception_common(debug_info_t* id, int level,
+ const void* data, int length);
+
+/* Debug Feature API: */
+
+debug_info_t *debug_register(const char *name, int pages, int nr_areas,
+ int buf_size);
+
+debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas,
+ int buf_size, umode_t mode, uid_t uid,
+ gid_t gid);
+
+void debug_unregister(debug_info_t* id);
+
+void debug_set_level(debug_info_t* id, int new_level);
+
+void debug_set_critical(void);
+void debug_stop_all(void);
+
+static inline bool debug_level_enabled(debug_info_t* id, int level)
+{
+ return level <= id->level;
+}
+
+static inline debug_entry_t*
+debug_event(debug_info_t* id, int level, void* data, int length)
+{
+ if ((!id) || (level > id->level) || (id->pages_per_area == 0))
+ return NULL;
+ return debug_event_common(id,level,data,length);
+}
+
+static inline debug_entry_t*
+debug_int_event(debug_info_t* id, int level, unsigned int tag)
+{
+ unsigned int t=tag;
+ if ((!id) || (level > id->level) || (id->pages_per_area == 0))
+ return NULL;
+ return debug_event_common(id,level,&t,sizeof(unsigned int));
+}
+
+static inline debug_entry_t *
+debug_long_event (debug_info_t* id, int level, unsigned long tag)
+{
+ unsigned long t=tag;
+ if ((!id) || (level > id->level) || (id->pages_per_area == 0))
+ return NULL;
+ return debug_event_common(id,level,&t,sizeof(unsigned long));
+}
+
+static inline debug_entry_t*
+debug_text_event(debug_info_t* id, int level, const char* txt)
+{
+ if ((!id) || (level > id->level) || (id->pages_per_area == 0))
+ return NULL;
+ return debug_event_common(id,level,txt,strlen(txt));
+}
+
+/*
+ * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
+ * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
+ */
+extern debug_entry_t *
+__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
+ __attribute__ ((format(printf, 3, 4)));
+
+#define debug_sprintf_event(_id, _level, _fmt, ...) \
+({ \
+ debug_entry_t *__ret; \
+ debug_info_t *__id = _id; \
+ int __level = _level; \
+ if ((!__id) || (__level > __id->level)) \
+ __ret = NULL; \
+ else \
+ __ret = __debug_sprintf_event(__id, __level, \
+ _fmt, ## __VA_ARGS__); \
+ __ret; \
+})
+
+static inline debug_entry_t*
+debug_exception(debug_info_t* id, int level, void* data, int length)
+{
+ if ((!id) || (level > id->level) || (id->pages_per_area == 0))
+ return NULL;
+ return debug_exception_common(id,level,data,length);
+}
+
+static inline debug_entry_t*
+debug_int_exception(debug_info_t* id, int level, unsigned int tag)
+{
+ unsigned int t=tag;
+ if ((!id) || (level > id->level) || (id->pages_per_area == 0))
+ return NULL;
+ return debug_exception_common(id,level,&t,sizeof(unsigned int));
+}
+
+static inline debug_entry_t *
+debug_long_exception (debug_info_t* id, int level, unsigned long tag)
+{
+ unsigned long t=tag;
+ if ((!id) || (level > id->level) || (id->pages_per_area == 0))
+ return NULL;
+ return debug_exception_common(id,level,&t,sizeof(unsigned long));
+}
+
+static inline debug_entry_t*
+debug_text_exception(debug_info_t* id, int level, const char* txt)
+{
+ if ((!id) || (level > id->level) || (id->pages_per_area == 0))
+ return NULL;
+ return debug_exception_common(id,level,txt,strlen(txt));
+}
+
+/*
+ * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
+ * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
+ */
+extern debug_entry_t *
+__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
+ __attribute__ ((format(printf, 3, 4)));
+
+#define debug_sprintf_exception(_id, _level, _fmt, ...) \
+({ \
+ debug_entry_t *__ret; \
+ debug_info_t *__id = _id; \
+ int __level = _level; \
+ if ((!__id) || (__level > __id->level)) \
+ __ret = NULL; \
+ else \
+ __ret = __debug_sprintf_exception(__id, __level, \
+ _fmt, ## __VA_ARGS__);\
+ __ret; \
+})
+
+int debug_register_view(debug_info_t* id, struct debug_view* view);
+int debug_unregister_view(debug_info_t* id, struct debug_view* view);
+
+/*
+ define the debug levels:
+ - 0 No debugging output to console or syslog
+ - 1 Log internal errors to syslog, ignore check conditions
+ - 2 Log internal errors and check conditions to syslog
+ - 3 Log internal errors to console, log check conditions to syslog
+ - 4 Log internal errors and check conditions to console
+ - 5 panic on internal errors, log check conditions to console
+ - 6 panic on both, internal errors and check conditions
+ */
+
+#ifndef DEBUG_LEVEL
+#define DEBUG_LEVEL 4
+#endif
+
+#define INTERNAL_ERRMSG(x,y...) "E" __FILE__ "%d: " x, __LINE__, y
+#define INTERNAL_WRNMSG(x,y...) "W" __FILE__ "%d: " x, __LINE__, y
+#define INTERNAL_INFMSG(x,y...) "I" __FILE__ "%d: " x, __LINE__, y
+#define INTERNAL_DEBMSG(x,y...) "D" __FILE__ "%d: " x, __LINE__, y
+
+#if DEBUG_LEVEL > 0
+#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#define PRINT_INFO(x...) printk ( KERN_INFO PRINTK_HEADER x )
+#define PRINT_WARN(x...) printk ( KERN_WARNING PRINTK_HEADER x )
+#define PRINT_ERR(x...) printk ( KERN_ERR PRINTK_HEADER x )
+#define PRINT_FATAL(x...) panic ( PRINTK_HEADER x )
+#else
+#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#define PRINT_INFO(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#define PRINT_WARN(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#define PRINT_ERR(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
+#endif /* DASD_DEBUG */
+
+#endif /* DEBUG_H */
diff --git a/kernel/arch/s390/include/asm/delay.h b/kernel/arch/s390/include/asm/delay.h
new file mode 100644
index 000000000..3f6e4095f
--- /dev/null
+++ b/kernel/arch/s390/include/asm/delay.h
@@ -0,0 +1,24 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/delay.h"
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines calling functions in arch/s390/lib/delay.c
+ */
+
+#ifndef _S390_DELAY_H
+#define _S390_DELAY_H
+
+void __ndelay(unsigned long long nsecs);
+void __udelay(unsigned long long usecs);
+void udelay_simple(unsigned long long usecs);
+void __delay(unsigned long loops);
+
+#define ndelay(n) __ndelay((unsigned long long) (n))
+#define udelay(n) __udelay((unsigned long long) (n))
+#define mdelay(n) __udelay((unsigned long long) (n) * 1000)
+
+#endif /* defined(_S390_DELAY_H) */
diff --git a/kernel/arch/s390/include/asm/device.h b/kernel/arch/s390/include/asm/device.h
new file mode 100644
index 000000000..d8f9872b0
--- /dev/null
+++ b/kernel/arch/s390/include/asm/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/kernel/arch/s390/include/asm/diag.h b/kernel/arch/s390/include/asm/diag.h
new file mode 100644
index 000000000..7e91c5807
--- /dev/null
+++ b/kernel/arch/s390/include/asm/diag.h
@@ -0,0 +1,52 @@
+/*
+ * s390 diagnose functions
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_DIAG_H
+#define _ASM_S390_DIAG_H
+
+/*
+ * Diagnose 10: Release page range
+ */
+static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
+{
+ unsigned long start_addr, end_addr;
+
+ start_addr = start_pfn << PAGE_SHIFT;
+ end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT;
+
+ asm volatile(
+ "0: diag %0,%1,0x10\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ EX_TABLE(1b, 1b)
+ : : "a" (start_addr), "a" (end_addr));
+}
+
+/*
+ * Diagnose 14: Input spool file manipulation
+ */
+extern int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode);
+
+/*
+ * Diagnose 210: Get information about a virtual device
+ */
+struct diag210 {
+ u16 vrdcdvno; /* device number (input) */
+ u16 vrdclen; /* data block length (input) */
+ u8 vrdcvcla; /* virtual device class (output) */
+ u8 vrdcvtyp; /* virtual device type (output) */
+ u8 vrdcvsta; /* virtual device status (output) */
+ u8 vrdcvfla; /* virtual device flags (output) */
+ u8 vrdcrccl; /* real device class (output) */
+ u8 vrdccrty; /* real device type (output) */
+ u8 vrdccrmd; /* real device model (output) */
+ u8 vrdccrft; /* real device feature (output) */
+} __attribute__((packed, aligned(4)));
+
+extern int diag210(struct diag210 *addr);
+
+#endif /* _ASM_S390_DIAG_H */
diff --git a/kernel/arch/s390/include/asm/dis.h b/kernel/arch/s390/include/asm/dis.h
new file mode 100644
index 000000000..60323c219
--- /dev/null
+++ b/kernel/arch/s390/include/asm/dis.h
@@ -0,0 +1,53 @@
+/*
+ * Disassemble s390 instructions.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#ifndef __ASM_S390_DIS_H__
+#define __ASM_S390_DIS_H__
+
+/* Type of operand */
+#define OPERAND_GPR 0x1 /* Operand printed as %rx */
+#define OPERAND_FPR 0x2 /* Operand printed as %fx */
+#define OPERAND_AR 0x4 /* Operand printed as %ax */
+#define OPERAND_CR 0x8 /* Operand printed as %cx */
+#define OPERAND_VR 0x10 /* Operand printed as %vx */
+#define OPERAND_DISP 0x20 /* Operand printed as displacement */
+#define OPERAND_BASE 0x40 /* Operand printed as base register */
+#define OPERAND_INDEX 0x80 /* Operand printed as index register */
+#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
+#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
+#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
+
+
+struct s390_operand {
+ int bits; /* The number of bits in the operand. */
+ int shift; /* The number of bits to shift. */
+ int flags; /* One bit syntax flags. */
+};
+
+struct s390_insn {
+ const char name[5];
+ unsigned char opfrag;
+ unsigned char format;
+};
+
+
+static inline int insn_length(unsigned char code)
+{
+ return ((((int) code + 64) >> 7) + 1) << 1;
+}
+
+void show_code(struct pt_regs *regs);
+void print_fn_code(unsigned char *code, unsigned long len);
+int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
+struct s390_insn *find_insn(unsigned char *code);
+
+static inline int is_known_insn(unsigned char *code)
+{
+ return !!find_insn(code);
+}
+
+#endif /* __ASM_S390_DIS_H__ */
diff --git a/kernel/arch/s390/include/asm/div64.h b/kernel/arch/s390/include/asm/div64.h
new file mode 100644
index 000000000..6cd978cef
--- /dev/null
+++ b/kernel/arch/s390/include/asm/div64.h
@@ -0,0 +1 @@
+#include <asm-generic/div64.h>
diff --git a/kernel/arch/s390/include/asm/dma-mapping.h b/kernel/arch/s390/include/asm/dma-mapping.h
new file mode 100644
index 000000000..9d395961e
--- /dev/null
+++ b/kernel/arch/s390/include/asm/dma-mapping.h
@@ -0,0 +1,90 @@
+#ifndef _ASM_S390_DMA_MAPPING_H
+#define _ASM_S390_DMA_MAPPING_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-debug.h>
+#include <linux/io.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
+
+extern struct dma_map_ops s390_dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return &s390_dma_ops;
+}
+
+extern int dma_set_mask(struct device *dev, u64 mask);
+
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (dma_ops->dma_supported == NULL)
+ return 1;
+ return dma_ops->dma_supported(dev, mask);
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ debug_dma_mapping_error(dev, dma_addr);
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dev, dma_addr);
+ return dma_addr == DMA_ERROR_CODE;
+}
+
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ BUG_ON(!ops);
+
+ cpu_addr = ops->alloc(dev, size, dma_handle, flags, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+
+ return cpu_addr;
+}
+
+#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!ops);
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+#endif /* _ASM_S390_DMA_MAPPING_H */
diff --git a/kernel/arch/s390/include/asm/dma.h b/kernel/arch/s390/include/asm/dma.h
new file mode 100644
index 000000000..bb9bdcd20
--- /dev/null
+++ b/kernel/arch/s390/include/asm/dma.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_S390_DMA_H
+#define _ASM_S390_DMA_H
+
+#include <asm/io.h>
+
+/*
+ * MAX_DMA_ADDRESS is ambiguous because on s390 its completely unrelated
+ * to DMA. It _is_ used for the s390 memory zone split at 2GB caused
+ * by the 31 bit heritage.
+ */
+#define MAX_DMA_ADDRESS 0x80000000
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* _ASM_S390_DMA_H */
diff --git a/kernel/arch/s390/include/asm/eadm.h b/kernel/arch/s390/include/asm/eadm.h
new file mode 100644
index 000000000..67026300c
--- /dev/null
+++ b/kernel/arch/s390/include/asm/eadm.h
@@ -0,0 +1,117 @@
+#ifndef _ASM_S390_EADM_H
+#define _ASM_S390_EADM_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct arqb {
+ u64 data;
+ u16 fmt:4;
+ u16:12;
+ u16 cmd_code;
+ u16:16;
+ u16 msb_count;
+ u32 reserved[12];
+} __packed;
+
+#define ARQB_CMD_MOVE 1
+
+struct arsb {
+ u16 fmt:4;
+ u32:28;
+ u8 ef;
+ u8:8;
+ u8 ecbi;
+ u8:8;
+ u8 fvf;
+ u16:16;
+ u8 eqc;
+ u32:32;
+ u64 fail_msb;
+ u64 fail_aidaw;
+ u64 fail_ms;
+ u64 fail_scm;
+ u32 reserved[4];
+} __packed;
+
+#define EQC_WR_PROHIBIT 22
+
+struct msb {
+ u8 fmt:4;
+ u8 oc:4;
+ u8 flags;
+ u16:12;
+ u16 bs:4;
+ u32 blk_count;
+ u64 data_addr;
+ u64 scm_addr;
+ u64:64;
+} __packed;
+
+struct aidaw {
+ u8 flags;
+ u32 :24;
+ u32 :32;
+ u64 data_addr;
+} __packed;
+
+#define MSB_OC_CLEAR 0
+#define MSB_OC_READ 1
+#define MSB_OC_WRITE 2
+#define MSB_OC_RELEASE 3
+
+#define MSB_FLAG_BNM 0x80
+#define MSB_FLAG_IDA 0x40
+
+#define MSB_BS_4K 0
+#define MSB_BS_1M 1
+
+#define AOB_NR_MSB 124
+
+struct aob {
+ struct arqb request;
+ struct arsb response;
+ struct msb msb[AOB_NR_MSB];
+} __packed __aligned(PAGE_SIZE);
+
+struct aob_rq_header {
+ struct scm_device *scmdev;
+ char data[0];
+};
+
+struct scm_device {
+ u64 address;
+ u64 size;
+ unsigned int nr_max_block;
+ struct device dev;
+ struct {
+ unsigned int persistence:4;
+ unsigned int oper_state:4;
+ unsigned int data_state:4;
+ unsigned int rank:4;
+ unsigned int release:1;
+ unsigned int res_id:8;
+ } __packed attrs;
+};
+
+#define OP_STATE_GOOD 1
+#define OP_STATE_TEMP_ERR 2
+#define OP_STATE_PERM_ERR 3
+
+enum scm_event {SCM_CHANGE, SCM_AVAIL};
+
+struct scm_driver {
+ struct device_driver drv;
+ int (*probe) (struct scm_device *scmdev);
+ int (*remove) (struct scm_device *scmdev);
+ void (*notify) (struct scm_device *scmdev, enum scm_event event);
+ void (*handler) (struct scm_device *scmdev, void *data, int error);
+};
+
+int scm_driver_register(struct scm_driver *scmdrv);
+void scm_driver_unregister(struct scm_driver *scmdrv);
+
+int eadm_start_aob(struct aob *aob);
+void scm_irq_handler(struct aob *aob, int error);
+
+#endif /* _ASM_S390_EADM_H */
diff --git a/kernel/arch/s390/include/asm/ebcdic.h b/kernel/arch/s390/include/asm/ebcdic.h
new file mode 100644
index 000000000..c5befc5a3
--- /dev/null
+++ b/kernel/arch/s390/include/asm/ebcdic.h
@@ -0,0 +1,48 @@
+/*
+ * EBCDIC -> ASCII, ASCII -> EBCDIC conversion routines.
+ *
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _EBCDIC_H
+#define _EBCDIC_H
+
+#ifndef _S390_TYPES_H
+#include <types.h>
+#endif
+
+extern __u8 _ascebc_500[256]; /* ASCII -> EBCDIC 500 conversion table */
+extern __u8 _ebcasc_500[256]; /* EBCDIC 500 -> ASCII conversion table */
+extern __u8 _ascebc[256]; /* ASCII -> EBCDIC conversion table */
+extern __u8 _ebcasc[256]; /* EBCDIC -> ASCII conversion table */
+extern __u8 _ebc_tolower[256]; /* EBCDIC -> lowercase */
+extern __u8 _ebc_toupper[256]; /* EBCDIC -> uppercase */
+
+static inline void
+codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
+{
+ if (nr-- <= 0)
+ return;
+ asm volatile(
+ " bras 1,1f\n"
+ " tr 0(1,%0),0(%2)\n"
+ "0: tr 0(256,%0),0(%2)\n"
+ " la %0,256(%0)\n"
+ "1: ahi %1,-256\n"
+ " jnm 0b\n"
+ " ex %1,0(1)"
+ : "+&a" (addr), "+&a" (nr)
+ : "a" (codepage) : "cc", "memory", "1");
+}
+
+#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr)
+#define EBCASC(addr,nr) codepage_convert(_ebcasc, addr, nr)
+#define ASCEBC_500(addr,nr) codepage_convert(_ascebc_500, addr, nr)
+#define EBCASC_500(addr,nr) codepage_convert(_ebcasc_500, addr, nr)
+#define EBC_TOLOWER(addr,nr) codepage_convert(_ebc_tolower, addr, nr)
+#define EBC_TOUPPER(addr,nr) codepage_convert(_ebc_toupper, addr, nr)
+
+#endif
+
diff --git a/kernel/arch/s390/include/asm/elf.h b/kernel/arch/s390/include/asm/elf.h
new file mode 100644
index 000000000..3ad48f22d
--- /dev/null
+++ b/kernel/arch/s390/include/asm/elf.h
@@ -0,0 +1,227 @@
+/*
+ * S390 version
+ *
+ * Derived from "include/asm-i386/elf.h"
+ */
+
+#ifndef __ASMS390_ELF_H
+#define __ASMS390_ELF_H
+
+/* s390 relocations defined by the ABIs */
+#define R_390_NONE 0 /* No reloc. */
+#define R_390_8 1 /* Direct 8 bit. */
+#define R_390_12 2 /* Direct 12 bit. */
+#define R_390_16 3 /* Direct 16 bit. */
+#define R_390_32 4 /* Direct 32 bit. */
+#define R_390_PC32 5 /* PC relative 32 bit. */
+#define R_390_GOT12 6 /* 12 bit GOT offset. */
+#define R_390_GOT32 7 /* 32 bit GOT offset. */
+#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
+#define R_390_COPY 9 /* Copy symbol at runtime. */
+#define R_390_GLOB_DAT 10 /* Create GOT entry. */
+#define R_390_JMP_SLOT 11 /* Create PLT entry. */
+#define R_390_RELATIVE 12 /* Adjust by program base. */
+#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
+#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */
+#define R_390_GOT16 15 /* 16 bit GOT offset. */
+#define R_390_PC16 16 /* PC relative 16 bit. */
+#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
+#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
+#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
+#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
+#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
+#define R_390_64 22 /* Direct 64 bit. */
+#define R_390_PC64 23 /* PC relative 64 bit. */
+#define R_390_GOT64 24 /* 64 bit GOT offset. */
+#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
+#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
+#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
+#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
+#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
+#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
+#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
+#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
+#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
+#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
+#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
+#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
+#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
+#define R_390_TLS_GDCALL 38 /* Tag for function call in general
+ dynamic TLS code. */
+#define R_390_TLS_LDCALL 39 /* Tag for function call in local
+ dynamic TLS code. */
+#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
+ thread local data in LD code. */
+#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
+ thread local data in LD code. */
+#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
+ block. */
+#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
+ block. */
+#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
+#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
+#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS
+ block. */
+#define R_390_20 57 /* Direct 20 bit. */
+#define R_390_GOT20 58 /* 20 bit GOT offset. */
+#define R_390_GOTPLT20 59 /* 20 bit offset to jump slot. */
+#define R_390_TLS_GOTIE20 60 /* 20 bit GOT offset for static TLS
+ block offset. */
+/* Keep this the last entry. */
+#define R_390_NUM 61
+
+/* Bits present in AT_HWCAP. */
+#define HWCAP_S390_ESAN3 1
+#define HWCAP_S390_ZARCH 2
+#define HWCAP_S390_STFLE 4
+#define HWCAP_S390_MSA 8
+#define HWCAP_S390_LDISP 16
+#define HWCAP_S390_EIMM 32
+#define HWCAP_S390_DFP 64
+#define HWCAP_S390_HPAGE 128
+#define HWCAP_S390_ETF3EH 256
+#define HWCAP_S390_HIGH_GPRS 512
+#define HWCAP_S390_TE 1024
+#define HWCAP_S390_VXRS 2048
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS64
+#define ELF_DATA ELFDATA2MSB
+#define ELF_ARCH EM_S390
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+#include <asm/compat.h>
+#include <asm/syscall.h>
+#include <asm/user.h>
+
+typedef s390_fp_regs elf_fpregset_t;
+typedef s390_regs elf_gregset_t;
+
+typedef s390_fp_regs compat_elf_fpregset_t;
+typedef s390_compat_regs compat_elf_gregset_t;
+
+#include <linux/sched.h> /* for task_struct */
+#include <asm/mmu_context.h>
+
+#include <asm/vdso.h>
+
+extern unsigned int vdso_enabled;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) \
+ (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
+ && (x)->e_ident[EI_CLASS] == ELF_CLASS)
+#define compat_elf_check_arch(x) \
+ (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
+ && (x)->e_ident[EI_CLASS] == ELF_CLASS)
+#define compat_start_thread start_thread31
+
+/* For SVR4/S390 the function pointer to be registered with `atexit` is
+ passed in R14. */
+#define ELF_PLAT_INIT(_r, load_addr) \
+ do { \
+ _r->gprs[14] = 0; \
+ } while (0)
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE 4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. 64-bit
+ tasks are aligned to 4GB. */
+#define ELF_ET_DYN_BASE (is_32bit_task() ? \
+ (STACK_TOP / 3 * 2) : \
+ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. */
+
+extern unsigned long elf_hwcap;
+#define ELF_HWCAP (elf_hwcap)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo.
+
+ For the moment, we have only optimizations for the Intel generations,
+ but that could change... */
+
+#define ELF_PLATFORM_SIZE 8
+extern char elf_platform[];
+#define ELF_PLATFORM (elf_platform)
+
+#ifndef CONFIG_COMPAT
+#define SET_PERSONALITY(ex) \
+do { \
+ set_personality(PER_LINUX | \
+ (current->personality & (~PER_MASK))); \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table; \
+} while (0)
+#else /* CONFIG_COMPAT */
+#define SET_PERSONALITY(ex) \
+do { \
+ if (personality(current->personality) != PER_LINUX32) \
+ set_personality(PER_LINUX | \
+ (current->personality & ~PER_MASK)); \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
+ set_thread_flag(TIF_31BIT); \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table_emu; \
+ } else { \
+ clear_thread_flag(TIF_31BIT); \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table; \
+ } \
+} while (0)
+#endif /* CONFIG_COMPAT */
+
+extern unsigned long mmap_rnd_mask;
+
+#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
+
+#define ARCH_DLINFO \
+do { \
+ if (vdso_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long)current->mm->context.vdso_base); \
+} while (0)
+
+struct linux_binprm;
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+int arch_setup_additional_pages(struct linux_binprm *, int);
+
+void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
+
+#endif
diff --git a/kernel/arch/s390/include/asm/emergency-restart.h b/kernel/arch/s390/include/asm/emergency-restart.h
new file mode 100644
index 000000000..108d8c48e
--- /dev/null
+++ b/kernel/arch/s390/include/asm/emergency-restart.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_EMERGENCY_RESTART_H
+#define _ASM_EMERGENCY_RESTART_H
+
+#include <asm-generic/emergency-restart.h>
+
+#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/kernel/arch/s390/include/asm/etr.h b/kernel/arch/s390/include/asm/etr.h
new file mode 100644
index 000000000..629b79a93
--- /dev/null
+++ b/kernel/arch/s390/include/asm/etr.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright IBM Corp. 2006
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+#ifndef __S390_ETR_H
+#define __S390_ETR_H
+
+/* ETR attachment control register */
+struct etr_eacr {
+ unsigned int e0 : 1; /* port 0 stepping control */
+ unsigned int e1 : 1; /* port 1 stepping control */
+ unsigned int _pad0 : 5; /* must be 00100 */
+ unsigned int dp : 1; /* data port control */
+ unsigned int p0 : 1; /* port 0 change recognition control */
+ unsigned int p1 : 1; /* port 1 change recognition control */
+ unsigned int _pad1 : 3; /* must be 000 */
+ unsigned int ea : 1; /* ETR alert control */
+ unsigned int es : 1; /* ETR sync check control */
+ unsigned int sl : 1; /* switch to local control */
+} __attribute__ ((packed));
+
+/* Port state returned by steai */
+enum etr_psc {
+ etr_psc_operational = 0,
+ etr_psc_semi_operational = 1,
+ etr_psc_protocol_error = 4,
+ etr_psc_no_symbols = 8,
+ etr_psc_no_signal = 12,
+ etr_psc_pps_mode = 13
+};
+
+/* Logical port state returned by stetr */
+enum etr_lpsc {
+ etr_lpsc_operational_step = 0,
+ etr_lpsc_operational_alt = 1,
+ etr_lpsc_semi_operational = 2,
+ etr_lpsc_protocol_error = 4,
+ etr_lpsc_no_symbol_sync = 8,
+ etr_lpsc_no_signal = 12,
+ etr_lpsc_pps_mode = 13
+};
+
+/* ETR status words */
+struct etr_esw {
+ struct etr_eacr eacr; /* attachment control register */
+ unsigned int y : 1; /* stepping mode */
+ unsigned int _pad0 : 5; /* must be 00000 */
+ unsigned int p : 1; /* stepping port number */
+ unsigned int q : 1; /* data port number */
+ unsigned int psc0 : 4; /* port 0 state code */
+ unsigned int psc1 : 4; /* port 1 state code */
+} __attribute__ ((packed));
+
+/* Second level data register status word */
+struct etr_slsw {
+ unsigned int vv1 : 1; /* copy of validity bit data frame 1 */
+ unsigned int vv2 : 1; /* copy of validity bit data frame 2 */
+ unsigned int vv3 : 1; /* copy of validity bit data frame 3 */
+ unsigned int vv4 : 1; /* copy of validity bit data frame 4 */
+ unsigned int _pad0 : 19; /* must by all zeroes */
+ unsigned int n : 1; /* EAF port number */
+ unsigned int v1 : 1; /* validity bit ETR data frame 1 */
+ unsigned int v2 : 1; /* validity bit ETR data frame 2 */
+ unsigned int v3 : 1; /* validity bit ETR data frame 3 */
+ unsigned int v4 : 1; /* validity bit ETR data frame 4 */
+ unsigned int _pad1 : 4; /* must be 0000 */
+} __attribute__ ((packed));
+
+/* ETR data frames */
+struct etr_edf1 {
+ unsigned int u : 1; /* untuned bit */
+ unsigned int _pad0 : 1; /* must be 0 */
+ unsigned int r : 1; /* service request bit */
+ unsigned int _pad1 : 4; /* must be 0000 */
+ unsigned int a : 1; /* time adjustment bit */
+ unsigned int net_id : 8; /* ETR network id */
+ unsigned int etr_id : 8; /* id of ETR which sends data frames */
+ unsigned int etr_pn : 8; /* port number of ETR output port */
+} __attribute__ ((packed));
+
+struct etr_edf2 {
+ unsigned int etv : 32; /* Upper 32 bits of TOD. */
+} __attribute__ ((packed));
+
+struct etr_edf3 {
+ unsigned int rc : 8; /* failure reason code */
+ unsigned int _pad0 : 3; /* must be 000 */
+ unsigned int c : 1; /* ETR coupled bit */
+ unsigned int tc : 4; /* ETR type code */
+ unsigned int blto : 8; /* biased local time offset */
+ /* (blto - 128) * 15 = minutes */
+ unsigned int buo : 8; /* biased utc offset */
+ /* (buo - 128) = leap seconds */
+} __attribute__ ((packed));
+
+struct etr_edf4 {
+ unsigned int ed : 8; /* ETS device dependent data */
+ unsigned int _pad0 : 1; /* must be 0 */
+ unsigned int buc : 5; /* biased ut1 correction */
+ /* (buc - 16) * 0.1 seconds */
+ unsigned int em : 6; /* ETS error magnitude */
+ unsigned int dc : 6; /* ETS drift code */
+ unsigned int sc : 6; /* ETS steering code */
+} __attribute__ ((packed));
+
+/*
+ * ETR attachment information block, two formats
+ * format 1 has 4 reserved words with a size of 64 bytes
+ * format 2 has 16 reserved words with a size of 96 bytes
+ */
+struct etr_aib {
+ struct etr_esw esw;
+ struct etr_slsw slsw;
+ unsigned long long tsp;
+ struct etr_edf1 edf1;
+ struct etr_edf2 edf2;
+ struct etr_edf3 edf3;
+ struct etr_edf4 edf4;
+ unsigned int reserved[16];
+} __attribute__ ((packed,aligned(8)));
+
+/* ETR interruption parameter */
+struct etr_irq_parm {
+ unsigned int _pad0 : 8;
+ unsigned int pc0 : 1; /* port 0 state change */
+ unsigned int pc1 : 1; /* port 1 state change */
+ unsigned int _pad1 : 3;
+ unsigned int eai : 1; /* ETR alert indication */
+ unsigned int _pad2 : 18;
+} __attribute__ ((packed));
+
+/* Query TOD offset result */
+struct etr_ptff_qto {
+ unsigned long long physical_clock;
+ unsigned long long tod_offset;
+ unsigned long long logical_tod_offset;
+ unsigned long long tod_epoch_difference;
+} __attribute__ ((packed));
+
+/* Inline assembly helper functions */
+static inline int etr_setr(struct etr_eacr *ctrl)
+{
+ int rc = -EOPNOTSUPP;
+
+ asm volatile(
+ " .insn s,0xb2160000,%1\n"
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (rc) : "Q" (*ctrl));
+ return rc;
+}
+
+/* Stores a format 1 aib with 64 bytes */
+static inline int etr_stetr(struct etr_aib *aib)
+{
+ int rc = -EOPNOTSUPP;
+
+ asm volatile(
+ " .insn s,0xb2170000,%1\n"
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (rc) : "Q" (*aib));
+ return rc;
+}
+
+/* Stores a format 2 aib with 96 bytes for specified port */
+static inline int etr_steai(struct etr_aib *aib, unsigned int func)
+{
+ register unsigned int reg0 asm("0") = func;
+ int rc = -EOPNOTSUPP;
+
+ asm volatile(
+ " .insn s,0xb2b30000,%1\n"
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (rc) : "Q" (*aib), "d" (reg0));
+ return rc;
+}
+
+/* Function codes for the steai instruction. */
+#define ETR_STEAI_STEPPING_PORT 0x10
+#define ETR_STEAI_ALTERNATE_PORT 0x11
+#define ETR_STEAI_PORT_0 0x12
+#define ETR_STEAI_PORT_1 0x13
+
+static inline int etr_ptff(void *ptff_block, unsigned int func)
+{
+ register unsigned int reg0 asm("0") = func;
+ register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
+ int rc = -EOPNOTSUPP;
+
+ asm volatile(
+ " .word 0x0104\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (rc), "=m" (ptff_block)
+ : "d" (reg0), "d" (reg1), "m" (ptff_block) : "cc");
+ return rc;
+}
+
+/* Function codes for the ptff instruction. */
+#define ETR_PTFF_QAF 0x00 /* query available functions */
+#define ETR_PTFF_QTO 0x01 /* query tod offset */
+#define ETR_PTFF_QSI 0x02 /* query steering information */
+#define ETR_PTFF_ATO 0x40 /* adjust tod offset */
+#define ETR_PTFF_STO 0x41 /* set tod offset */
+#define ETR_PTFF_SFS 0x42 /* set fine steering rate */
+#define ETR_PTFF_SGS 0x43 /* set gross steering rate */
+
+/* Functions needed by the machine check handler */
+void etr_switch_to_local(void);
+void etr_sync_check(void);
+
+/* STP interruption parameter */
+struct stp_irq_parm {
+ unsigned int _pad0 : 14;
+ unsigned int tsc : 1; /* Timing status change */
+ unsigned int lac : 1; /* Link availability change */
+ unsigned int tcpc : 1; /* Time control parameter change */
+ unsigned int _pad2 : 15;
+} __attribute__ ((packed));
+
+#define STP_OP_SYNC 1
+#define STP_OP_CTRL 3
+
+struct stp_sstpi {
+ unsigned int rsvd0;
+ unsigned int rsvd1 : 8;
+ unsigned int stratum : 8;
+ unsigned int vbits : 16;
+ unsigned int leaps : 16;
+ unsigned int tmd : 4;
+ unsigned int ctn : 4;
+ unsigned int rsvd2 : 3;
+ unsigned int c : 1;
+ unsigned int tst : 4;
+ unsigned int tzo : 16;
+ unsigned int dsto : 16;
+ unsigned int ctrl : 16;
+ unsigned int rsvd3 : 16;
+ unsigned int tto;
+ unsigned int rsvd4;
+ unsigned int ctnid[3];
+ unsigned int rsvd5;
+ unsigned int todoff[4];
+ unsigned int rsvd6[48];
+} __attribute__ ((packed));
+
+/* Functions needed by the machine check handler */
+void stp_sync_check(void);
+void stp_island_check(void);
+
+#endif /* __S390_ETR_H */
diff --git a/kernel/arch/s390/include/asm/exec.h b/kernel/arch/s390/include/asm/exec.h
new file mode 100644
index 000000000..c4a93d632
--- /dev/null
+++ b/kernel/arch/s390/include/asm/exec.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_EXEC_H
+#define __ASM_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* __ASM_EXEC_H */
diff --git a/kernel/arch/s390/include/asm/extmem.h b/kernel/arch/s390/include/asm/extmem.h
new file mode 100644
index 000000000..6276002d7
--- /dev/null
+++ b/kernel/arch/s390/include/asm/extmem.h
@@ -0,0 +1,31 @@
+/*
+ * definitions for external memory segment support
+ * Copyright IBM Corp. 2003
+ */
+
+#ifndef _ASM_S390X_DCSS_H
+#define _ASM_S390X_DCSS_H
+#ifndef __ASSEMBLY__
+
+/* possible values for segment type as returned by segment_info */
+#define SEG_TYPE_SW 0
+#define SEG_TYPE_EW 1
+#define SEG_TYPE_SR 2
+#define SEG_TYPE_ER 3
+#define SEG_TYPE_SN 4
+#define SEG_TYPE_EN 5
+#define SEG_TYPE_SC 6
+#define SEG_TYPE_EWEN 7
+
+#define SEGMENT_SHARED 0
+#define SEGMENT_EXCLUSIVE 1
+
+int segment_load (char *name, int segtype, unsigned long *addr, unsigned long *length);
+void segment_unload(char *name);
+void segment_save(char *name);
+int segment_type (char* name);
+int segment_modify_shared (char *name, int do_nonshared);
+void segment_warning(int rc, char *seg_name);
+
+#endif
+#endif
diff --git a/kernel/arch/s390/include/asm/facility.h b/kernel/arch/s390/include/asm/facility.h
new file mode 100644
index 000000000..0aa6a7ed9
--- /dev/null
+++ b/kernel/arch/s390/include/asm/facility.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_FACILITY_H
+#define __ASM_FACILITY_H
+
+#include <linux/string.h>
+#include <linux/preempt.h>
+#include <asm/lowcore.h>
+
+#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
+
+static inline int __test_facility(unsigned long nr, void *facilities)
+{
+ unsigned char *ptr;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return 0;
+ ptr = (unsigned char *) facilities + (nr >> 3);
+ return (*ptr & (0x80 >> (nr & 7))) != 0;
+}
+
+/*
+ * The test_facility function uses the bit odering where the MSB is bit 0.
+ * That makes it easier to query facility bits with the bit number as
+ * documented in the Principles of Operation.
+ */
+static inline int test_facility(unsigned long nr)
+{
+ return __test_facility(nr, &S390_lowcore.stfle_fac_list);
+}
+
+/**
+ * stfle - Store facility list extended
+ * @stfle_fac_list: array where facility list can be stored
+ * @size: size of passed in array in double words
+ */
+static inline void stfle(u64 *stfle_fac_list, int size)
+{
+ unsigned long nr;
+
+ preempt_disable();
+ asm volatile(
+ " .insn s,0xb2b10000,0(0)\n" /* stfl */
+ "0:\n"
+ EX_TABLE(0b, 0b)
+ : "+m" (S390_lowcore.stfl_fac_list));
+ nr = 4; /* bytes stored by stfl */
+ memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
+ if (S390_lowcore.stfl_fac_list & 0x01000000) {
+ /* More facility bits available with stfle */
+ register unsigned long reg0 asm("0") = size - 1;
+
+ asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
+ : "+d" (reg0)
+ : "a" (stfle_fac_list)
+ : "memory", "cc");
+ nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
+ }
+ memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
+ preempt_enable();
+}
+
+#endif /* __ASM_FACILITY_H */
diff --git a/kernel/arch/s390/include/asm/fb.h b/kernel/arch/s390/include/asm/fb.h
new file mode 100644
index 000000000..c7df38030
--- /dev/null
+++ b/kernel/arch/s390/include/asm/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/kernel/arch/s390/include/asm/fcx.h b/kernel/arch/s390/include/asm/fcx.h
new file mode 100644
index 000000000..7ecb92b46
--- /dev/null
+++ b/kernel/arch/s390/include/asm/fcx.h
@@ -0,0 +1,311 @@
+/*
+ * Functions for assembling fcx enabled I/O control blocks.
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_FCX_H
+#define _ASM_S390_FCX_H _ASM_S390_FCX_H
+
+#include <linux/types.h>
+
+#define TCW_FORMAT_DEFAULT 0
+#define TCW_TIDAW_FORMAT_DEFAULT 0
+#define TCW_FLAGS_INPUT_TIDA (1 << (23 - 5))
+#define TCW_FLAGS_TCCB_TIDA (1 << (23 - 6))
+#define TCW_FLAGS_OUTPUT_TIDA (1 << (23 - 7))
+#define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9)
+#define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3)
+
+/**
+ * struct tcw - Transport Control Word (TCW)
+ * @format: TCW format
+ * @flags: TCW flags
+ * @tccbl: Transport-Command-Control-Block Length
+ * @r: Read Operations
+ * @w: Write Operations
+ * @output: Output-Data Address
+ * @input: Input-Data Address
+ * @tsb: Transport-Status-Block Address
+ * @tccb: Transport-Command-Control-Block Address
+ * @output_count: Output Count
+ * @input_count: Input Count
+ * @intrg: Interrogate TCW Address
+ */
+struct tcw {
+ u32 format:2;
+ u32 :6;
+ u32 flags:24;
+ u32 :8;
+ u32 tccbl:6;
+ u32 r:1;
+ u32 w:1;
+ u32 :16;
+ u64 output;
+ u64 input;
+ u64 tsb;
+ u64 tccb;
+ u32 output_count;
+ u32 input_count;
+ u32 :32;
+ u32 :32;
+ u32 :32;
+ u32 intrg;
+} __attribute__ ((packed, aligned(64)));
+
+#define TIDAW_FLAGS_LAST (1 << (7 - 0))
+#define TIDAW_FLAGS_SKIP (1 << (7 - 1))
+#define TIDAW_FLAGS_DATA_INT (1 << (7 - 2))
+#define TIDAW_FLAGS_TTIC (1 << (7 - 3))
+#define TIDAW_FLAGS_INSERT_CBC (1 << (7 - 4))
+
+/**
+ * struct tidaw - Transport-Indirect-Addressing Word (TIDAW)
+ * @flags: TIDAW flags. Can be an arithmetic OR of the following constants:
+ * %TIDAW_FLAGS_LAST, %TIDAW_FLAGS_SKIP, %TIDAW_FLAGS_DATA_INT,
+ * %TIDAW_FLAGS_TTIC, %TIDAW_FLAGS_INSERT_CBC
+ * @count: Count
+ * @addr: Address
+ */
+struct tidaw {
+ u32 flags:8;
+ u32 :24;
+ u32 count;
+ u64 addr;
+} __attribute__ ((packed, aligned(16)));
+
+/**
+ * struct tsa_iostat - I/O-Status Transport-Status Area (IO-Stat TSA)
+ * @dev_time: Device Time
+ * @def_time: Defer Time
+ * @queue_time: Queue Time
+ * @dev_busy_time: Device-Busy Time
+ * @dev_act_time: Device-Active-Only Time
+ * @sense: Sense Data (if present)
+ */
+struct tsa_iostat {
+ u32 dev_time;
+ u32 def_time;
+ u32 queue_time;
+ u32 dev_busy_time;
+ u32 dev_act_time;
+ u8 sense[32];
+} __attribute__ ((packed));
+
+/**
+ * struct tsa_ddpcs - Device-Detected-Program-Check Transport-Status Area (DDPC TSA)
+ * @rc: Reason Code
+ * @rcq: Reason Code Qualifier
+ * @sense: Sense Data (if present)
+ */
+struct tsa_ddpc {
+ u32 :24;
+ u32 rc:8;
+ u8 rcq[16];
+ u8 sense[32];
+} __attribute__ ((packed));
+
+#define TSA_INTRG_FLAGS_CU_STATE_VALID (1 << (7 - 0))
+#define TSA_INTRG_FLAGS_DEV_STATE_VALID (1 << (7 - 1))
+#define TSA_INTRG_FLAGS_OP_STATE_VALID (1 << (7 - 2))
+
+/**
+ * struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA)
+ * @format: Format
+ * @flags: Flags. Can be an arithmetic OR of the following constants:
+ * %TSA_INTRG_FLAGS_CU_STATE_VALID, %TSA_INTRG_FLAGS_DEV_STATE_VALID,
+ * %TSA_INTRG_FLAGS_OP_STATE_VALID
+ * @cu_state: Controle-Unit State
+ * @dev_state: Device State
+ * @op_state: Operation State
+ * @sd_info: State-Dependent Information
+ * @dl_id: Device-Level Identifier
+ * @dd_data: Device-Dependent Data
+ */
+struct tsa_intrg {
+ u32 format:8;
+ u32 flags:8;
+ u32 cu_state:8;
+ u32 dev_state:8;
+ u32 op_state:8;
+ u32 :24;
+ u8 sd_info[12];
+ u32 dl_id;
+ u8 dd_data[28];
+} __attribute__ ((packed));
+
+#define TSB_FORMAT_NONE 0
+#define TSB_FORMAT_IOSTAT 1
+#define TSB_FORMAT_DDPC 2
+#define TSB_FORMAT_INTRG 3
+
+#define TSB_FLAGS_DCW_OFFSET_VALID (1 << (7 - 0))
+#define TSB_FLAGS_COUNT_VALID (1 << (7 - 1))
+#define TSB_FLAGS_CACHE_MISS (1 << (7 - 2))
+#define TSB_FLAGS_TIME_VALID (1 << (7 - 3))
+#define TSB_FLAGS_FORMAT(x) ((x) & 7)
+#define TSB_FORMAT(t) ((t)->flags & 7)
+
+/**
+ * struct tsb - Transport-Status Block (TSB)
+ * @length: Length
+ * @flags: Flags. Can be an arithmetic OR of the following constants:
+ * %TSB_FLAGS_DCW_OFFSET_VALID, %TSB_FLAGS_COUNT_VALID, %TSB_FLAGS_CACHE_MISS,
+ * %TSB_FLAGS_TIME_VALID
+ * @dcw_offset: DCW Offset
+ * @count: Count
+ * @tsa: Transport-Status-Area
+ */
+struct tsb {
+ u32 length:8;
+ u32 flags:8;
+ u32 dcw_offset:16;
+ u32 count;
+ u32 :32;
+ union {
+ struct tsa_iostat iostat;
+ struct tsa_ddpc ddpc;
+ struct tsa_intrg intrg;
+ } __attribute__ ((packed)) tsa;
+} __attribute__ ((packed, aligned(8)));
+
+#define DCW_INTRG_FORMAT_DEFAULT 0
+
+#define DCW_INTRG_RC_UNSPECIFIED 0
+#define DCW_INTRG_RC_TIMEOUT 1
+
+#define DCW_INTRG_RCQ_UNSPECIFIED 0
+#define DCW_INTRG_RCQ_PRIMARY 1
+#define DCW_INTRG_RCQ_SECONDARY 2
+
+#define DCW_INTRG_FLAGS_MPM (1 << (7 - 0))
+#define DCW_INTRG_FLAGS_PPR (1 << (7 - 1))
+#define DCW_INTRG_FLAGS_CRIT (1 << (7 - 2))
+
+/**
+ * struct dcw_intrg_data - Interrogate DCW data
+ * @format: Format. Should be %DCW_INTRG_FORMAT_DEFAULT
+ * @rc: Reason Code. Can be one of %DCW_INTRG_RC_UNSPECIFIED,
+ * %DCW_INTRG_RC_TIMEOUT
+ * @rcq: Reason Code Qualifier: Can be one of %DCW_INTRG_RCQ_UNSPECIFIED,
+ * %DCW_INTRG_RCQ_PRIMARY, %DCW_INTRG_RCQ_SECONDARY
+ * @lpm: Logical-Path Mask
+ * @pam: Path-Available Mask
+ * @pim: Path-Installed Mask
+ * @timeout: Timeout
+ * @flags: Flags. Can be an arithmetic OR of %DCW_INTRG_FLAGS_MPM,
+ * %DCW_INTRG_FLAGS_PPR, %DCW_INTRG_FLAGS_CRIT
+ * @time: Time
+ * @prog_id: Program Identifier
+ * @prog_data: Program-Dependent Data
+ */
+struct dcw_intrg_data {
+ u32 format:8;
+ u32 rc:8;
+ u32 rcq:8;
+ u32 lpm:8;
+ u32 pam:8;
+ u32 pim:8;
+ u32 timeout:16;
+ u32 flags:8;
+ u32 :24;
+ u32 :32;
+ u64 time;
+ u64 prog_id;
+ u8 prog_data[0];
+} __attribute__ ((packed));
+
+#define DCW_FLAGS_CC (1 << (7 - 1))
+
+#define DCW_CMD_WRITE 0x01
+#define DCW_CMD_READ 0x02
+#define DCW_CMD_CONTROL 0x03
+#define DCW_CMD_SENSE 0x04
+#define DCW_CMD_SENSE_ID 0xe4
+#define DCW_CMD_INTRG 0x40
+
+/**
+ * struct dcw - Device-Command Word (DCW)
+ * @cmd: Command Code. Can be one of %DCW_CMD_WRITE, %DCW_CMD_READ,
+ * %DCW_CMD_CONTROL, %DCW_CMD_SENSE, %DCW_CMD_SENSE_ID, %DCW_CMD_INTRG
+ * @flags: Flags. Can be an arithmetic OR of %DCW_FLAGS_CC
+ * @cd_count: Control-Data Count
+ * @count: Count
+ * @cd: Control Data
+ */
+struct dcw {
+ u32 cmd:8;
+ u32 flags:8;
+ u32 :8;
+ u32 cd_count:8;
+ u32 count;
+ u8 cd[0];
+} __attribute__ ((packed));
+
+#define TCCB_FORMAT_DEFAULT 0x7f
+#define TCCB_MAX_DCW 30
+#define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \
+ TCCB_MAX_DCW * sizeof(struct dcw) + \
+ sizeof(struct tccb_tcat))
+#define TCCB_SAC_DEFAULT 0x1ffe
+#define TCCB_SAC_INTRG 0x1fff
+
+/**
+ * struct tccb_tcah - Transport-Command-Area Header (TCAH)
+ * @format: Format. Should be %TCCB_FORMAT_DEFAULT
+ * @tcal: Transport-Command-Area Length
+ * @sac: Service-Action Code. Can be one of %TCCB_SAC_DEFAULT, %TCCB_SAC_INTRG
+ * @prio: Priority
+ */
+struct tccb_tcah {
+ u32 format:8;
+ u32 :24;
+ u32 :24;
+ u32 tcal:8;
+ u32 sac:16;
+ u32 :8;
+ u32 prio:8;
+ u32 :32;
+} __attribute__ ((packed));
+
+/**
+ * struct tccb_tcat - Transport-Command-Area Trailer (TCAT)
+ * @count: Transport Count
+ */
+struct tccb_tcat {
+ u32 :32;
+ u32 count;
+} __attribute__ ((packed));
+
+/**
+ * struct tccb - (partial) Transport-Command-Control Block (TCCB)
+ * @tcah: TCAH
+ * @tca: Transport-Command Area
+ */
+struct tccb {
+ struct tccb_tcah tcah;
+ u8 tca[0];
+} __attribute__ ((packed, aligned(8)));
+
+struct tcw *tcw_get_intrg(struct tcw *tcw);
+void *tcw_get_data(struct tcw *tcw);
+struct tccb *tcw_get_tccb(struct tcw *tcw);
+struct tsb *tcw_get_tsb(struct tcw *tcw);
+
+void tcw_init(struct tcw *tcw, int r, int w);
+void tcw_finalize(struct tcw *tcw, int num_tidaws);
+
+void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw);
+void tcw_set_data(struct tcw *tcw, void *data, int use_tidal);
+void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb);
+void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb);
+
+void tccb_init(struct tccb *tccb, size_t tccb_size, u32 sac);
+void tsb_init(struct tsb *tsb);
+struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
+ void *cd, u8 cd_count, u32 count);
+struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
+ void *addr, u32 count);
+
+#endif /* _ASM_S390_FCX_H */
diff --git a/kernel/arch/s390/include/asm/ftrace.h b/kernel/arch/s390/include/asm/ftrace.h
new file mode 100644
index 000000000..836c56290
--- /dev/null
+++ b/kernel/arch/s390/include/asm/ftrace.h
@@ -0,0 +1,84 @@
+#ifndef _ASM_S390_FTRACE_H
+#define _ASM_S390_FTRACE_H
+
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+
+#ifdef CC_USING_HOTPATCH
+#define MCOUNT_INSN_SIZE 6
+#else
+#define MCOUNT_INSN_SIZE 24
+#define MCOUNT_RETURN_FIXUP 18
+#endif
+
+#ifndef __ASSEMBLY__
+
+#define ftrace_return_address(n) __builtin_return_address(n)
+
+void _mcount(void);
+void ftrace_caller(void);
+
+extern char ftrace_graph_caller_end;
+extern unsigned long ftrace_plt;
+
+struct dyn_arch_ftrace { };
+
+#define MCOUNT_ADDR ((unsigned long)_mcount)
+#define FTRACE_ADDR ((unsigned long)ftrace_caller)
+
+#define KPROBE_ON_FTRACE_NOP 0
+#define KPROBE_ON_FTRACE_CALL 1
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct ftrace_insn {
+ u16 opc;
+ s32 disp;
+} __packed;
+
+static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_HOTPATCH
+ /* brcl 0,0 */
+ insn->opc = 0xc004;
+ insn->disp = 0;
+#else
+ /* jg .+24 */
+ insn->opc = 0xc0f4;
+ insn->disp = MCOUNT_INSN_SIZE / 2;
+#endif
+#endif
+}
+
+static inline int is_ftrace_nop(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_HOTPATCH
+ if (insn->disp == 0)
+ return 1;
+#else
+ if (insn->disp == MCOUNT_INSN_SIZE / 2)
+ return 1;
+#endif
+#endif
+ return 0;
+}
+
+static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
+ unsigned long ip)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+ unsigned long target;
+
+ /* brasl r0,ftrace_caller */
+ target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
+ insn->opc = 0xc005;
+ insn->disp = (target - ip) / 2;
+#endif
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_S390_FTRACE_H */
diff --git a/kernel/arch/s390/include/asm/futex.h b/kernel/arch/s390/include/asm/futex.h
new file mode 100644
index 000000000..a4811aa03
--- /dev/null
+++ b/kernel/arch/s390/include/asm/futex.h
@@ -0,0 +1,96 @@
+#ifndef _ASM_S390_FUTEX_H
+#define _ASM_S390_FUTEX_H
+
+#include <linux/uaccess.h>
+#include <linux/futex.h>
+#include <asm/mmu_context.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
+ asm volatile( \
+ " sacf 256\n" \
+ "0: l %1,0(%6)\n" \
+ "1:"insn \
+ "2: cs %1,%2,0(%6)\n" \
+ "3: jl 1b\n" \
+ " lhi %0,0\n" \
+ "4: sacf 768\n" \
+ EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
+ : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
+ "=m" (*uaddr) \
+ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
+ "m" (*uaddr) : "cc");
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, newval, ret;
+
+ load_kernel_asce();
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ pagefault_disable();
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("lr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("lr %2,%1\nar %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("lr %2,%1\nor %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ pagefault_enable();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret;
+
+ load_kernel_asce();
+ asm volatile(
+ " sacf 256\n"
+ "0: cs %1,%4,0(%5)\n"
+ "1: la %0,0\n"
+ "2: sacf 768\n"
+ EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
+ : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
+ : "cc", "memory");
+ *uval = oldval;
+ return ret;
+}
+
+#endif /* _ASM_S390_FUTEX_H */
diff --git a/kernel/arch/s390/include/asm/hardirq.h b/kernel/arch/s390/include/asm/hardirq.h
new file mode 100644
index 000000000..b7eabaaef
--- /dev/null
+++ b/kernel/arch/s390/include/asm/hardirq.h
@@ -0,0 +1,26 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999, 2000
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *
+ * Derived from "include/asm-i386/hardirq.h"
+ */
+
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <asm/lowcore.h>
+
+#define local_softirq_pending() (S390_lowcore.softirq_pending)
+
+#define __ARCH_IRQ_STAT
+#define __ARCH_HAS_DO_SOFTIRQ
+#define __ARCH_IRQ_EXIT_IRQS_DISABLED
+
+static inline void ack_bad_irq(unsigned int irq)
+{
+ printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+}
+
+#endif /* __ASM_HARDIRQ_H */
diff --git a/kernel/arch/s390/include/asm/hugetlb.h b/kernel/arch/s390/include/asm/hugetlb.h
new file mode 100644
index 000000000..11eae5f55
--- /dev/null
+++ b/kernel/arch/s390/include/asm/hugetlb.h
@@ -0,0 +1,115 @@
+/*
+ * IBM System z Huge TLB Page Support for Kernel.
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_HUGETLB_H
+#define _ASM_S390_HUGETLB_H
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+
+#define is_hugepage_only_range(mm, addr, len) 0
+#define hugetlb_free_pgd_range free_pgd_range
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+pte_t huge_ptep_get(pte_t *ptep);
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+#define hugetlb_prefault_arch_hook(mm) do { } while (0)
+#define arch_clear_hugepage_flags(page) do { } while (0)
+
+int arch_prepare_hugepage(struct page *page);
+void arch_release_hugepage(struct page *page);
+
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ int changed = !pte_same(huge_ptep_get(ptep), pte);
+ if (changed) {
+ huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ }
+ return changed;
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
+ set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
+}
+
+static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+{
+ return mk_pte(page, pgprot);
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline int huge_pte_write(pte_t pte)
+{
+ return pte_write(pte);
+}
+
+static inline int huge_pte_dirty(pte_t pte)
+{
+ return pte_dirty(pte);
+}
+
+static inline pte_t huge_pte_mkwrite(pte_t pte)
+{
+ return pte_mkwrite(pte);
+}
+
+static inline pte_t huge_pte_mkdirty(pte_t pte)
+{
+ return pte_mkdirty(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return pte_modify(pte, newprot);
+}
+
+#endif /* _ASM_S390_HUGETLB_H */
diff --git a/kernel/arch/s390/include/asm/hw_irq.h b/kernel/arch/s390/include/asm/hw_irq.h
new file mode 100644
index 000000000..ee96a8b69
--- /dev/null
+++ b/kernel/arch/s390/include/asm/hw_irq.h
@@ -0,0 +1,11 @@
+#ifndef _HW_IRQ_H
+#define _HW_IRQ_H
+
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+void __init init_airq_interrupts(void);
+void __init init_cio_interrupts(void);
+void __init init_ext_interrupts(void);
+
+#endif
diff --git a/kernel/arch/s390/include/asm/idals.h b/kernel/arch/s390/include/asm/idals.h
new file mode 100644
index 000000000..a7b2d7504
--- /dev/null
+++ b/kernel/arch/s390/include/asm/idals.h
@@ -0,0 +1,232 @@
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 2000
+ *
+ * History of changes
+ * 07/24/00 new file
+ * 05/04/02 code restructuring.
+ */
+
+#ifndef _S390_IDALS_H
+#define _S390_IDALS_H
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/cio.h>
+#include <asm/uaccess.h>
+
+#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
+#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
+
+/*
+ * Test if an address/length pair needs an idal list.
+ */
+static inline int
+idal_is_needed(void *vaddr, unsigned int length)
+{
+ return ((__pa(vaddr) + length - 1) >> 31) != 0;
+}
+
+
+/*
+ * Return the number of idal words needed for an address/length pair.
+ */
+static inline unsigned int idal_nr_words(void *vaddr, unsigned int length)
+{
+ return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
+ (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
+}
+
+/*
+ * Create the list of idal words for an address/length pair.
+ */
+static inline unsigned long *idal_create_words(unsigned long *idaws,
+ void *vaddr, unsigned int length)
+{
+ unsigned long paddr;
+ unsigned int cidaw;
+
+ paddr = __pa(vaddr);
+ cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
+ (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
+ *idaws++ = paddr;
+ paddr &= -IDA_BLOCK_SIZE;
+ while (--cidaw > 0) {
+ paddr += IDA_BLOCK_SIZE;
+ *idaws++ = paddr;
+ }
+ return idaws;
+}
+
+/*
+ * Sets the address of the data in CCW.
+ * If necessary it allocates an IDAL and sets the appropriate flags.
+ */
+static inline int
+set_normalized_cda(struct ccw1 * ccw, void *vaddr)
+{
+ unsigned int nridaws;
+ unsigned long *idal;
+
+ if (ccw->flags & CCW_FLAG_IDA)
+ return -EINVAL;
+ nridaws = idal_nr_words(vaddr, ccw->count);
+ if (nridaws > 0) {
+ idal = kmalloc(nridaws * sizeof(unsigned long),
+ GFP_ATOMIC | GFP_DMA );
+ if (idal == NULL)
+ return -ENOMEM;
+ idal_create_words(idal, vaddr, ccw->count);
+ ccw->flags |= CCW_FLAG_IDA;
+ vaddr = idal;
+ }
+ ccw->cda = (__u32)(unsigned long) vaddr;
+ return 0;
+}
+
+/*
+ * Releases any allocated IDAL related to the CCW.
+ */
+static inline void
+clear_normalized_cda(struct ccw1 * ccw)
+{
+ if (ccw->flags & CCW_FLAG_IDA) {
+ kfree((void *)(unsigned long) ccw->cda);
+ ccw->flags &= ~CCW_FLAG_IDA;
+ }
+ ccw->cda = 0;
+}
+
+/*
+ * Idal buffer extension
+ */
+struct idal_buffer {
+ size_t size;
+ size_t page_order;
+ void *data[0];
+};
+
+/*
+ * Allocate an idal buffer
+ */
+static inline struct idal_buffer *
+idal_buffer_alloc(size_t size, int page_order)
+{
+ struct idal_buffer *ib;
+ int nr_chunks, nr_ptrs, i;
+
+ nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
+ nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
+ ib = kmalloc(sizeof(struct idal_buffer) + nr_ptrs*sizeof(void *),
+ GFP_DMA | GFP_KERNEL);
+ if (ib == NULL)
+ return ERR_PTR(-ENOMEM);
+ ib->size = size;
+ ib->page_order = page_order;
+ for (i = 0; i < nr_ptrs; i++) {
+ if ((i & (nr_chunks - 1)) != 0) {
+ ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
+ continue;
+ }
+ ib->data[i] = (void *)
+ __get_free_pages(GFP_KERNEL, page_order);
+ if (ib->data[i] != NULL)
+ continue;
+ // Not enough memory
+ while (i >= nr_chunks) {
+ i -= nr_chunks;
+ free_pages((unsigned long) ib->data[i],
+ ib->page_order);
+ }
+ kfree(ib);
+ return ERR_PTR(-ENOMEM);
+ }
+ return ib;
+}
+
+/*
+ * Free an idal buffer.
+ */
+static inline void
+idal_buffer_free(struct idal_buffer *ib)
+{
+ int nr_chunks, nr_ptrs, i;
+
+ nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
+ nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
+ for (i = 0; i < nr_ptrs; i += nr_chunks)
+ free_pages((unsigned long) ib->data[i], ib->page_order);
+ kfree(ib);
+}
+
+/*
+ * Test if a idal list is really needed.
+ */
+static inline int
+__idal_buffer_is_needed(struct idal_buffer *ib)
+{
+ return ib->size > (4096ul << ib->page_order) ||
+ idal_is_needed(ib->data[0], ib->size);
+}
+
+/*
+ * Set channel data address to idal buffer.
+ */
+static inline void
+idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
+{
+ if (__idal_buffer_is_needed(ib)) {
+ // setup idals;
+ ccw->cda = (u32)(addr_t) ib->data;
+ ccw->flags |= CCW_FLAG_IDA;
+ } else
+ // we do not need idals - use direct addressing
+ ccw->cda = (u32)(addr_t) ib->data[0];
+ ccw->count = ib->size;
+}
+
+/*
+ * Copy count bytes from an idal buffer to user memory
+ */
+static inline size_t
+idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
+{
+ size_t left;
+ int i;
+
+ BUG_ON(count > ib->size);
+ for (i = 0; count > IDA_BLOCK_SIZE; i++) {
+ left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
+ if (left)
+ return left + count - IDA_BLOCK_SIZE;
+ to = (void __user *) to + IDA_BLOCK_SIZE;
+ count -= IDA_BLOCK_SIZE;
+ }
+ return copy_to_user(to, ib->data[i], count);
+}
+
+/*
+ * Copy count bytes from user memory to an idal buffer
+ */
+static inline size_t
+idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
+{
+ size_t left;
+ int i;
+
+ BUG_ON(count > ib->size);
+ for (i = 0; count > IDA_BLOCK_SIZE; i++) {
+ left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
+ if (left)
+ return left + count - IDA_BLOCK_SIZE;
+ from = (void __user *) from + IDA_BLOCK_SIZE;
+ count -= IDA_BLOCK_SIZE;
+ }
+ return copy_from_user(ib->data[i], from, count);
+}
+
+#endif
diff --git a/kernel/arch/s390/include/asm/idle.h b/kernel/arch/s390/include/asm/idle.h
new file mode 100644
index 000000000..113cd963d
--- /dev/null
+++ b/kernel/arch/s390/include/asm/idle.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright IBM Corp. 2014
+ *
+ * Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _S390_IDLE_H
+#define _S390_IDLE_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/seqlock.h>
+
+struct s390_idle_data {
+ seqcount_t seqcount;
+ unsigned long long idle_count;
+ unsigned long long idle_time;
+ unsigned long long clock_idle_enter;
+ unsigned long long clock_idle_exit;
+ unsigned long long timer_idle_enter;
+ unsigned long long timer_idle_exit;
+};
+
+extern struct device_attribute dev_attr_idle_count;
+extern struct device_attribute dev_attr_idle_time_us;
+
+#endif /* _S390_IDLE_H */
diff --git a/kernel/arch/s390/include/asm/io.h b/kernel/arch/s390/include/asm/io.h
new file mode 100644
index 000000000..30fd5c846
--- /dev/null
+++ b/kernel/arch/s390/include/asm/io.h
@@ -0,0 +1,77 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/io.h"
+ */
+
+#ifndef _S390_IO_H
+#define _S390_IO_H
+
+#include <linux/kernel.h>
+#include <asm/page.h>
+#include <asm/pci_io.h>
+
+#define xlate_dev_mem_ptr xlate_dev_mem_ptr
+void *xlate_dev_mem_ptr(phys_addr_t phys);
+#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
+void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
+
+#define IO_SPACE_LIMIT 0
+
+#ifdef CONFIG_PCI
+
+#define ioremap_nocache(addr, size) ioremap(addr, size)
+#define ioremap_wc ioremap_nocache
+
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+{
+ return (void __iomem *) offset;
+}
+
+static inline void iounmap(volatile void __iomem *addr)
+{
+}
+
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return NULL;
+}
+
+static inline void ioport_unmap(void __iomem *p)
+{
+}
+
+/*
+ * s390 needs a private implementation of pci_iomap since ioremap with its
+ * offset parameter isn't sufficient. That's because BAR spaces are not
+ * disjunctive on s390 so we need the bar parameter of pci_iomap to find
+ * the corresponding device and create the mapping cookie.
+ */
+#define pci_iomap pci_iomap
+#define pci_iounmap pci_iounmap
+
+#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
+#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
+#define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
+
+#define __raw_readb zpci_read_u8
+#define __raw_readw zpci_read_u16
+#define __raw_readl zpci_read_u32
+#define __raw_readq zpci_read_u64
+#define __raw_writeb zpci_write_u8
+#define __raw_writew zpci_write_u16
+#define __raw_writel zpci_write_u32
+#define __raw_writeq zpci_write_u64
+
+#endif /* CONFIG_PCI */
+
+#include <asm-generic/io.h>
+
+#endif
diff --git a/kernel/arch/s390/include/asm/ipl.h b/kernel/arch/s390/include/asm/ipl.h
new file mode 100644
index 000000000..ece606c2e
--- /dev/null
+++ b/kernel/arch/s390/include/asm/ipl.h
@@ -0,0 +1,182 @@
+/*
+ * s390 (re)ipl support
+ *
+ * Copyright IBM Corp. 2007
+ */
+
+#ifndef _ASM_S390_IPL_H
+#define _ASM_S390_IPL_H
+
+#include <asm/lowcore.h>
+#include <asm/types.h>
+#include <asm/cio.h>
+#include <asm/setup.h>
+
+#define IPL_PARMBLOCK_ORIGIN 0x2000
+
+#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
+ sizeof(struct ipl_block_fcp))
+
+#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16)
+
+#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
+ sizeof(struct ipl_block_ccw))
+
+#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16)
+
+#define IPL_MAX_SUPPORTED_VERSION (0)
+
+#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \
+ IPL_PARMBLOCK_ORIGIN)
+#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len)
+
+struct ipl_list_hdr {
+ u32 len;
+ u8 reserved1[3];
+ u8 version;
+ u32 blk0_len;
+ u8 pbt;
+ u8 flags;
+ u16 reserved2;
+ u8 loadparm[8];
+} __attribute__((packed));
+
+struct ipl_block_fcp {
+ u8 reserved1[305-1];
+ u8 opt;
+ u8 reserved2[3];
+ u16 reserved3;
+ u16 devno;
+ u8 reserved4[4];
+ u64 wwpn;
+ u64 lun;
+ u32 bootprog;
+ u8 reserved5[12];
+ u64 br_lba;
+ u32 scp_data_len;
+ u8 reserved6[260];
+ u8 scp_data[];
+} __attribute__((packed));
+
+#define DIAG308_VMPARM_SIZE 64
+#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \
+ offsetof(struct ipl_block_fcp, scp_data)))
+
+struct ipl_block_ccw {
+ u8 reserved1[84];
+ u8 reserved2[2];
+ u16 devno;
+ u8 vm_flags;
+ u8 reserved3[3];
+ u32 vm_parm_len;
+ u8 nss_name[8];
+ u8 vm_parm[DIAG308_VMPARM_SIZE];
+ u8 reserved4[8];
+} __attribute__((packed));
+
+struct ipl_parameter_block {
+ struct ipl_list_hdr hdr;
+ union {
+ struct ipl_block_fcp fcp;
+ struct ipl_block_ccw ccw;
+ } ipl_info;
+} __attribute__((packed,aligned(4096)));
+
+/*
+ * IPL validity flags
+ */
+extern u32 ipl_flags;
+extern u32 dump_prefix_page;
+
+struct dump_save_areas {
+ struct save_area_ext **areas;
+ int count;
+};
+
+extern struct dump_save_areas dump_save_areas;
+struct save_area_ext *dump_save_area_create(int cpu);
+
+extern void do_reipl(void);
+extern void do_halt(void);
+extern void do_poff(void);
+extern void ipl_save_parameters(void);
+extern void ipl_update_parameters(void);
+extern size_t append_ipl_vmparm(char *, size_t);
+extern size_t append_ipl_scpdata(char *, size_t);
+
+enum {
+ IPL_DEVNO_VALID = 1,
+ IPL_PARMBLOCK_VALID = 2,
+ IPL_NSS_VALID = 4,
+};
+
+enum ipl_type {
+ IPL_TYPE_UNKNOWN = 1,
+ IPL_TYPE_CCW = 2,
+ IPL_TYPE_FCP = 4,
+ IPL_TYPE_FCP_DUMP = 8,
+ IPL_TYPE_NSS = 16,
+};
+
+struct ipl_info
+{
+ enum ipl_type type;
+ union {
+ struct {
+ struct ccw_dev_id dev_id;
+ } ccw;
+ struct {
+ struct ccw_dev_id dev_id;
+ u64 wwpn;
+ u64 lun;
+ } fcp;
+ struct {
+ char name[NSS_NAME_SIZE + 1];
+ } nss;
+ } data;
+};
+
+extern struct ipl_info ipl_info;
+extern void setup_ipl(void);
+
+/*
+ * DIAG 308 support
+ */
+enum diag308_subcode {
+ DIAG308_REL_HSA = 2,
+ DIAG308_IPL = 3,
+ DIAG308_DUMP = 4,
+ DIAG308_SET = 5,
+ DIAG308_STORE = 6,
+};
+
+enum diag308_ipl_type {
+ DIAG308_IPL_TYPE_FCP = 0,
+ DIAG308_IPL_TYPE_CCW = 2,
+};
+
+enum diag308_opt {
+ DIAG308_IPL_OPT_IPL = 0x10,
+ DIAG308_IPL_OPT_DUMP = 0x20,
+};
+
+enum diag308_flags {
+ DIAG308_FLAGS_LP_VALID = 0x80,
+};
+
+enum diag308_vm_flags {
+ DIAG308_VM_FLAGS_NSS_VALID = 0x80,
+ DIAG308_VM_FLAGS_VP_VALID = 0x40,
+};
+
+enum diag308_rc {
+ DIAG308_RC_OK = 0x0001,
+ DIAG308_RC_NOCONFIG = 0x0102,
+};
+
+extern int diag308(unsigned long subcode, void *addr);
+extern void diag308_reset(void);
+extern void store_status(void);
+extern void lgr_info_log(void);
+
+#endif /* _ASM_S390_IPL_H */
diff --git a/kernel/arch/s390/include/asm/irq.h b/kernel/arch/s390/include/asm/irq.h
new file mode 100644
index 000000000..ff95d15a2
--- /dev/null
+++ b/kernel/arch/s390/include/asm/irq.h
@@ -0,0 +1,106 @@
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+#define EXT_INTERRUPT 0
+#define IO_INTERRUPT 1
+#define THIN_INTERRUPT 2
+
+#define NR_IRQS_BASE 3
+
+#ifdef CONFIG_PCI_NR_MSI
+# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
+#else
+# define NR_IRQS NR_IRQS_BASE
+#endif
+
+/* External interruption codes */
+#define EXT_IRQ_INTERRUPT_KEY 0x0040
+#define EXT_IRQ_CLK_COMP 0x1004
+#define EXT_IRQ_CPU_TIMER 0x1005
+#define EXT_IRQ_WARNING_TRACK 0x1007
+#define EXT_IRQ_MALFUNC_ALERT 0x1200
+#define EXT_IRQ_EMERGENCY_SIG 0x1201
+#define EXT_IRQ_EXTERNAL_CALL 0x1202
+#define EXT_IRQ_TIMING_ALERT 0x1406
+#define EXT_IRQ_MEASURE_ALERT 0x1407
+#define EXT_IRQ_SERVICE_SIG 0x2401
+#define EXT_IRQ_CP_SERVICE 0x2603
+#define EXT_IRQ_IUCV 0x4000
+
+#ifndef __ASSEMBLY__
+
+#include <linux/hardirq.h>
+#include <linux/percpu.h>
+#include <linux/cache.h>
+#include <linux/types.h>
+
+enum interruption_class {
+ IRQEXT_CLK,
+ IRQEXT_EXC,
+ IRQEXT_EMS,
+ IRQEXT_TMR,
+ IRQEXT_TLA,
+ IRQEXT_PFL,
+ IRQEXT_DSD,
+ IRQEXT_VRT,
+ IRQEXT_SCP,
+ IRQEXT_IUC,
+ IRQEXT_CMS,
+ IRQEXT_CMC,
+ IRQEXT_CMR,
+ IRQEXT_FTP,
+ IRQIO_CIO,
+ IRQIO_QAI,
+ IRQIO_DAS,
+ IRQIO_C15,
+ IRQIO_C70,
+ IRQIO_TAP,
+ IRQIO_VMR,
+ IRQIO_LCS,
+ IRQIO_CTC,
+ IRQIO_APB,
+ IRQIO_ADM,
+ IRQIO_CSC,
+ IRQIO_PCI,
+ IRQIO_MSI,
+ IRQIO_VIR,
+ IRQIO_VAI,
+ NMI_NMI,
+ CPU_RST,
+ NR_ARCH_IRQS
+};
+
+struct irq_stat {
+ unsigned int irqs[NR_ARCH_IRQS];
+};
+
+DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
+
+static __always_inline void inc_irq_stat(enum interruption_class irq)
+{
+ __this_cpu_inc(irq_stat.irqs[irq]);
+}
+
+struct ext_code {
+ unsigned short subcode;
+ unsigned short code;
+};
+
+typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long);
+
+int register_external_irq(u16 code, ext_int_handler_t handler);
+int unregister_external_irq(u16 code, ext_int_handler_t handler);
+
+enum irq_subclass {
+ IRQ_SUBCLASS_MEASUREMENT_ALERT = 5,
+ IRQ_SUBCLASS_SERVICE_SIGNAL = 9,
+};
+
+void irq_subclass_register(enum irq_subclass subclass);
+void irq_subclass_unregister(enum irq_subclass subclass);
+
+#define irq_canonicalize(irq) (irq)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IRQ_H */
diff --git a/kernel/arch/s390/include/asm/irq_regs.h b/kernel/arch/s390/include/asm/irq_regs.h
new file mode 100644
index 000000000..3dd9c0b70
--- /dev/null
+++ b/kernel/arch/s390/include/asm/irq_regs.h
@@ -0,0 +1 @@
+#include <asm-generic/irq_regs.h>
diff --git a/kernel/arch/s390/include/asm/irqflags.h b/kernel/arch/s390/include/asm/irqflags.h
new file mode 100644
index 000000000..16aa0c779
--- /dev/null
+++ b/kernel/arch/s390/include/asm/irqflags.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright IBM Corp. 2006, 2010
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_IRQFLAGS_H
+#define __ASM_IRQFLAGS_H
+
+#include <linux/types.h>
+
+/* store then OR system mask. */
+#define __arch_local_irq_stosm(__or) \
+({ \
+ unsigned long __mask; \
+ asm volatile( \
+ " stosm %0,%1" \
+ : "=Q" (__mask) : "i" (__or) : "memory"); \
+ __mask; \
+})
+
+/* store then AND system mask. */
+#define __arch_local_irq_stnsm(__and) \
+({ \
+ unsigned long __mask; \
+ asm volatile( \
+ " stnsm %0,%1" \
+ : "=Q" (__mask) : "i" (__and) : "memory"); \
+ __mask; \
+})
+
+/* set system mask. */
+static inline notrace void __arch_local_irq_ssm(unsigned long flags)
+{
+ asm volatile("ssm %0" : : "Q" (flags) : "memory");
+}
+
+static inline notrace unsigned long arch_local_save_flags(void)
+{
+ return __arch_local_irq_stnsm(0xff);
+}
+
+static inline notrace unsigned long arch_local_irq_save(void)
+{
+ return __arch_local_irq_stnsm(0xfc);
+}
+
+static inline notrace void arch_local_irq_disable(void)
+{
+ arch_local_irq_save();
+}
+
+static inline notrace void arch_local_irq_enable(void)
+{
+ __arch_local_irq_stosm(0x03);
+}
+
+static inline notrace void arch_local_irq_restore(unsigned long flags)
+{
+ __arch_local_irq_ssm(flags);
+}
+
+static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (3UL << (BITS_PER_LONG - 8)));
+}
+
+static inline notrace bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* __ASM_IRQFLAGS_H */
diff --git a/kernel/arch/s390/include/asm/isc.h b/kernel/arch/s390/include/asm/isc.h
new file mode 100644
index 000000000..68d7d6830
--- /dev/null
+++ b/kernel/arch/s390/include/asm/isc.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_S390_ISC_H
+#define _ASM_S390_ISC_H
+
+#include <linux/types.h>
+
+/*
+ * I/O interruption subclasses used by drivers.
+ * Please add all used iscs here so that it is possible to distribute
+ * isc usage between drivers.
+ * Reminder: 0 is highest priority, 7 lowest.
+ */
+#define MAX_ISC 7
+
+/* Regular I/O interrupts. */
+#define IO_SCH_ISC 3 /* regular I/O subchannels */
+#define CONSOLE_ISC 1 /* console I/O subchannel */
+#define EADM_SCH_ISC 4 /* EADM subchannels */
+#define CHSC_SCH_ISC 7 /* CHSC subchannels */
+/* Adapter interrupts. */
+#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
+#define PCI_ISC 2 /* PCI I/O subchannels */
+#define AP_ISC 6 /* adjunct processor (crypto) devices */
+
+/* Functions for registration of I/O interruption subclasses */
+void isc_register(unsigned int isc);
+void isc_unregister(unsigned int isc);
+
+#endif /* _ASM_S390_ISC_H */
diff --git a/kernel/arch/s390/include/asm/itcw.h b/kernel/arch/s390/include/asm/itcw.h
new file mode 100644
index 000000000..fb1bedd3d
--- /dev/null
+++ b/kernel/arch/s390/include/asm/itcw.h
@@ -0,0 +1,30 @@
+/*
+ * Functions for incremental construction of fcx enabled I/O control blocks.
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_ITCW_H
+#define _ASM_S390_ITCW_H
+
+#include <linux/types.h>
+#include <asm/fcx.h>
+
+#define ITCW_OP_READ 0
+#define ITCW_OP_WRITE 1
+
+struct itcw;
+
+struct tcw *itcw_get_tcw(struct itcw *itcw);
+size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws);
+struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
+ int max_tidaws, int intrg_max_tidaws);
+struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
+ u8 cd_count, u32 count);
+struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr,
+ u32 count);
+void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal);
+void itcw_finalize(struct itcw *itcw);
+
+#endif /* _ASM_S390_ITCW_H */
diff --git a/kernel/arch/s390/include/asm/jump_label.h b/kernel/arch/s390/include/asm/jump_label.h
new file mode 100644
index 000000000..69972b795
--- /dev/null
+++ b/kernel/arch/s390/include/asm/jump_label.h
@@ -0,0 +1,37 @@
+#ifndef _ASM_S390_JUMP_LABEL_H
+#define _ASM_S390_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+#define JUMP_LABEL_NOP_SIZE 6
+#define JUMP_LABEL_NOP_OFFSET 2
+
+/*
+ * We use a brcl 0,2 instruction for jump labels at compile time so it
+ * can be easily distinguished from a hotpatch generated instruction.
+ */
+static __always_inline bool arch_static_branch(struct static_key *key)
+{
+ asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
+ ".pushsection __jump_table, \"aw\"\n"
+ ".balign 8\n"
+ ".quad 0b, %l[label], %0\n"
+ ".popsection\n"
+ : : "X" (key) : : label);
+ return false;
+label:
+ return true;
+}
+
+typedef unsigned long jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/kernel/arch/s390/include/asm/kdebug.h b/kernel/arch/s390/include/asm/kdebug.h
new file mode 100644
index 000000000..5c1abd476
--- /dev/null
+++ b/kernel/arch/s390/include/asm/kdebug.h
@@ -0,0 +1,27 @@
+#ifndef _S390_KDEBUG_H
+#define _S390_KDEBUG_H
+
+/*
+ * Feb 2006 Ported to s390 <grundym@us.ibm.com>
+ */
+
+struct pt_regs;
+
+enum die_val {
+ DIE_OOPS = 1,
+ DIE_BPT,
+ DIE_SSTEP,
+ DIE_PANIC,
+ DIE_NMI,
+ DIE_DIE,
+ DIE_NMIWATCHDOG,
+ DIE_KERNELDEBUG,
+ DIE_TRAP,
+ DIE_GPF,
+ DIE_CALL,
+ DIE_NMI_IPI,
+};
+
+extern void die(struct pt_regs *, const char *);
+
+#endif
diff --git a/kernel/arch/s390/include/asm/kexec.h b/kernel/arch/s390/include/asm/kexec.h
new file mode 100644
index 000000000..2f924bc30
--- /dev/null
+++ b/kernel/arch/s390/include/asm/kexec.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright IBM Corp. 2005
+ *
+ * Author(s): Rolf Adelsberger <adelsberger@de.ibm.com>
+ *
+ */
+
+#ifndef _S390_KEXEC_H
+#define _S390_KEXEC_H
+
+#include <asm/processor.h>
+#include <asm/page.h>
+/*
+ * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
+ * I.e. Maximum page that is mapped directly into kernel memory,
+ * and kmap is not required.
+ */
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control pages */
+/* Not more than 2GB */
+#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
+
+/* Allocate control page with GFP_DMA */
+#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA
+
+/* Maximum address we can use for the crash control pages */
+#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
+
+/* Allocate one page for the pdp and the second for the code */
+#define KEXEC_CONTROL_PAGE_SIZE 4096
+
+/* Alignment of crashkernel memory */
+#define KEXEC_CRASH_MEM_ALIGN HPAGE_SIZE
+
+/* The native architecture */
+#define KEXEC_ARCH KEXEC_ARCH_S390
+
+/*
+ * Size for s390x ELF notes per CPU
+ *
+ * Seven notes plus zero note at the end: prstatus, fpregset, timer,
+ * tod_cmp, tod_reg, control regs, and prefix
+ */
+#define KEXEC_NOTE_BYTES \
+ (ALIGN(sizeof(struct elf_note), 4) * 8 + \
+ ALIGN(sizeof("CORE"), 4) * 7 + \
+ ALIGN(sizeof(struct elf_prstatus), 4) + \
+ ALIGN(sizeof(elf_fpregset_t), 4) + \
+ ALIGN(sizeof(u64), 4) + \
+ ALIGN(sizeof(u64), 4) + \
+ ALIGN(sizeof(u32), 4) + \
+ ALIGN(sizeof(u64) * 16, 4) + \
+ ALIGN(sizeof(u32), 4) \
+ )
+
+/* Provide a dummy definition to avoid build failures. */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs) { }
+
+#endif /*_S390_KEXEC_H */
diff --git a/kernel/arch/s390/include/asm/kmap_types.h b/kernel/arch/s390/include/asm/kmap_types.h
new file mode 100644
index 000000000..0a8862233
--- /dev/null
+++ b/kernel/arch/s390/include/asm/kmap_types.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+#include <asm-generic/kmap_types.h>
+
+#endif
diff --git a/kernel/arch/s390/include/asm/kprobes.h b/kernel/arch/s390/include/asm/kprobes.h
new file mode 100644
index 000000000..b47ad3b64
--- /dev/null
+++ b/kernel/arch/s390/include/asm/kprobes.h
@@ -0,0 +1,94 @@
+#ifndef _ASM_S390_KPROBES_H
+#define _ASM_S390_KPROBES_H
+/*
+ * Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corp. 2002, 2006
+ *
+ * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ * Probes initial implementation ( includes suggestions from
+ * Rusty Russell).
+ * 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli
+ * <ananth@in.ibm.com>
+ * 2005-Dec Used as a template for s390 by Mike Grundy
+ * <grundym@us.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+
+struct pt_regs;
+struct kprobe;
+
+typedef u16 kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION 0x0002
+
+/* Maximum instruction size is 3 (16bit) halfwords: */
+#define MAX_INSN_SIZE 0x0003
+#define MAX_STACK_SIZE 64
+#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
+ (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
+ ? (MAX_STACK_SIZE) \
+ : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
+
+#define kretprobe_blacklist_size 0
+
+#define KPROBE_SWAP_INST 0x10
+
+#define FIXUP_PSW_NORMAL 0x08
+#define FIXUP_BRANCH_NOT_TAKEN 0x04
+#define FIXUP_RETURN_REGISTER 0x02
+#define FIXUP_NOT_REQUIRED 0x01
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+ /* copy of original instruction */
+ kprobe_opcode_t *insn;
+ unsigned int is_ftrace_insn : 1;
+};
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned long kprobe_status;
+ unsigned long kprobe_saved_imask;
+ unsigned long kprobe_saved_ctl[3];
+ struct prev_kprobe prev_kprobe;
+ struct pt_regs jprobe_saved_regs;
+ kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+void kretprobe_trampoline(void);
+
+int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+int probe_is_prohibited_opcode(u16 *insn);
+int probe_get_fixup_type(u16 *insn);
+int probe_is_insn_relative_long(u16 *insn);
+
+#define flush_insn_slot(p) do { } while (0)
+
+#endif /* _ASM_S390_KPROBES_H */
diff --git a/kernel/arch/s390/include/asm/kvm_host.h b/kernel/arch/s390/include/asm/kvm_host.h
new file mode 100644
index 000000000..905007eea
--- /dev/null
+++ b/kernel/arch/s390/include/asm/kvm_host.h
@@ -0,0 +1,642 @@
+/*
+ * definition for kernel virtual machines on s390
+ *
+ * Copyright IBM Corp. 2008, 2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ */
+
+
+#ifndef ASM_KVM_HOST_H
+#define ASM_KVM_HOST_H
+
+#include <linux/types.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+#include <asm/debug.h>
+#include <asm/cpu.h>
+#include <asm/isc.h>
+
+#define KVM_MAX_VCPUS 64
+#define KVM_USER_MEM_SLOTS 32
+
+/*
+ * These seem to be used for allocating ->chip in the routing table,
+ * which we don't use. 4096 is an out-of-thin-air value. If we need
+ * to look at ->chip later on, we'll need to revisit this.
+ */
+#define KVM_NR_IRQCHIPS 1
+#define KVM_IRQCHIP_NUM_PINS 4096
+
+#define SIGP_CTRL_C 0x80
+#define SIGP_CTRL_SCN_MASK 0x3f
+
+struct sca_entry {
+ __u8 reserved0;
+ __u8 sigp_ctrl;
+ __u16 reserved[3];
+ __u64 sda;
+ __u64 reserved2[2];
+} __attribute__((packed));
+
+union ipte_control {
+ unsigned long val;
+ struct {
+ unsigned long k : 1;
+ unsigned long kh : 31;
+ unsigned long kg : 32;
+ };
+};
+
+struct sca_block {
+ union ipte_control ipte_control;
+ __u64 reserved[5];
+ __u64 mcn;
+ __u64 reserved2;
+ struct sca_entry cpu[64];
+} __attribute__((packed));
+
+#define CPUSTAT_STOPPED 0x80000000
+#define CPUSTAT_WAIT 0x10000000
+#define CPUSTAT_ECALL_PEND 0x08000000
+#define CPUSTAT_STOP_INT 0x04000000
+#define CPUSTAT_IO_INT 0x02000000
+#define CPUSTAT_EXT_INT 0x01000000
+#define CPUSTAT_RUNNING 0x00800000
+#define CPUSTAT_RETAINED 0x00400000
+#define CPUSTAT_TIMING_SUB 0x00020000
+#define CPUSTAT_SIE_SUB 0x00010000
+#define CPUSTAT_RRF 0x00008000
+#define CPUSTAT_SLSV 0x00004000
+#define CPUSTAT_SLSR 0x00002000
+#define CPUSTAT_ZARCH 0x00000800
+#define CPUSTAT_MCDS 0x00000100
+#define CPUSTAT_SM 0x00000080
+#define CPUSTAT_IBS 0x00000040
+#define CPUSTAT_G 0x00000008
+#define CPUSTAT_GED 0x00000004
+#define CPUSTAT_J 0x00000002
+#define CPUSTAT_P 0x00000001
+
+struct kvm_s390_sie_block {
+ atomic_t cpuflags; /* 0x0000 */
+ __u32 : 1; /* 0x0004 */
+ __u32 prefix : 18;
+ __u32 : 1;
+ __u32 ibc : 12;
+ __u8 reserved08[4]; /* 0x0008 */
+#define PROG_IN_SIE (1<<0)
+ __u32 prog0c; /* 0x000c */
+ __u8 reserved10[16]; /* 0x0010 */
+#define PROG_BLOCK_SIE 0x00000001
+ atomic_t prog20; /* 0x0020 */
+ __u8 reserved24[4]; /* 0x0024 */
+ __u64 cputm; /* 0x0028 */
+ __u64 ckc; /* 0x0030 */
+ __u64 epoch; /* 0x0038 */
+ __u8 reserved40[4]; /* 0x0040 */
+#define LCTL_CR0 0x8000
+#define LCTL_CR6 0x0200
+#define LCTL_CR9 0x0040
+#define LCTL_CR10 0x0020
+#define LCTL_CR11 0x0010
+#define LCTL_CR14 0x0002
+ __u16 lctl; /* 0x0044 */
+ __s16 icpua; /* 0x0046 */
+#define ICTL_PINT 0x20000000
+#define ICTL_LPSW 0x00400000
+#define ICTL_STCTL 0x00040000
+#define ICTL_ISKE 0x00004000
+#define ICTL_SSKE 0x00002000
+#define ICTL_RRBE 0x00001000
+#define ICTL_TPROT 0x00000200
+ __u32 ictl; /* 0x0048 */
+ __u32 eca; /* 0x004c */
+#define ICPT_INST 0x04
+#define ICPT_PROGI 0x08
+#define ICPT_INSTPROGI 0x0C
+#define ICPT_OPEREXC 0x2C
+#define ICPT_PARTEXEC 0x38
+#define ICPT_IOINST 0x40
+ __u8 icptcode; /* 0x0050 */
+ __u8 icptstatus; /* 0x0051 */
+ __u16 ihcpu; /* 0x0052 */
+ __u8 reserved54[2]; /* 0x0054 */
+ __u16 ipa; /* 0x0056 */
+ __u32 ipb; /* 0x0058 */
+ __u32 scaoh; /* 0x005c */
+ __u8 reserved60; /* 0x0060 */
+ __u8 ecb; /* 0x0061 */
+ __u8 ecb2; /* 0x0062 */
+#define ECB3_AES 0x04
+#define ECB3_DEA 0x08
+ __u8 ecb3; /* 0x0063 */
+ __u32 scaol; /* 0x0064 */
+ __u8 reserved68[4]; /* 0x0068 */
+ __u32 todpr; /* 0x006c */
+ __u8 reserved70[32]; /* 0x0070 */
+ psw_t gpsw; /* 0x0090 */
+ __u64 gg14; /* 0x00a0 */
+ __u64 gg15; /* 0x00a8 */
+ __u8 reservedb0[20]; /* 0x00b0 */
+ __u16 extcpuaddr; /* 0x00c4 */
+ __u16 eic; /* 0x00c6 */
+ __u32 reservedc8; /* 0x00c8 */
+ __u16 pgmilc; /* 0x00cc */
+ __u16 iprcc; /* 0x00ce */
+ __u32 dxc; /* 0x00d0 */
+ __u16 mcn; /* 0x00d4 */
+ __u8 perc; /* 0x00d6 */
+ __u8 peratmid; /* 0x00d7 */
+ __u64 peraddr; /* 0x00d8 */
+ __u8 eai; /* 0x00e0 */
+ __u8 peraid; /* 0x00e1 */
+ __u8 oai; /* 0x00e2 */
+ __u8 armid; /* 0x00e3 */
+ __u8 reservede4[4]; /* 0x00e4 */
+ __u64 tecmc; /* 0x00e8 */
+ __u8 reservedf0[12]; /* 0x00f0 */
+#define CRYCB_FORMAT1 0x00000001
+#define CRYCB_FORMAT2 0x00000003
+ __u32 crycbd; /* 0x00fc */
+ __u64 gcr[16]; /* 0x0100 */
+ __u64 gbea; /* 0x0180 */
+ __u8 reserved188[24]; /* 0x0188 */
+ __u32 fac; /* 0x01a0 */
+ __u8 reserved1a4[20]; /* 0x01a4 */
+ __u64 cbrlo; /* 0x01b8 */
+ __u8 reserved1c0[8]; /* 0x01c0 */
+ __u32 ecd; /* 0x01c8 */
+ __u8 reserved1cc[18]; /* 0x01cc */
+ __u64 pp; /* 0x01de */
+ __u8 reserved1e6[2]; /* 0x01e6 */
+ __u64 itdba; /* 0x01e8 */
+ __u8 reserved1f0[16]; /* 0x01f0 */
+} __attribute__((packed));
+
+struct kvm_s390_itdb {
+ __u8 data[256];
+} __packed;
+
+struct kvm_s390_vregs {
+ __vector128 vrs[32];
+ __u8 reserved200[512]; /* for future vector expansion */
+} __packed;
+
+struct sie_page {
+ struct kvm_s390_sie_block sie_block;
+ __u8 reserved200[1024]; /* 0x0200 */
+ struct kvm_s390_itdb itdb; /* 0x0600 */
+ __u8 reserved700[1280]; /* 0x0700 */
+ struct kvm_s390_vregs vregs; /* 0x0c00 */
+} __packed;
+
+struct kvm_vcpu_stat {
+ u32 exit_userspace;
+ u32 exit_null;
+ u32 exit_external_request;
+ u32 exit_external_interrupt;
+ u32 exit_stop_request;
+ u32 exit_validity;
+ u32 exit_instruction;
+ u32 halt_successful_poll;
+ u32 halt_wakeup;
+ u32 instruction_lctl;
+ u32 instruction_lctlg;
+ u32 instruction_stctl;
+ u32 instruction_stctg;
+ u32 exit_program_interruption;
+ u32 exit_instr_and_program;
+ u32 deliver_external_call;
+ u32 deliver_emergency_signal;
+ u32 deliver_service_signal;
+ u32 deliver_virtio_interrupt;
+ u32 deliver_stop_signal;
+ u32 deliver_prefix_signal;
+ u32 deliver_restart_signal;
+ u32 deliver_program_int;
+ u32 deliver_io_int;
+ u32 exit_wait_state;
+ u32 instruction_pfmf;
+ u32 instruction_stidp;
+ u32 instruction_spx;
+ u32 instruction_stpx;
+ u32 instruction_stap;
+ u32 instruction_storage_key;
+ u32 instruction_ipte_interlock;
+ u32 instruction_stsch;
+ u32 instruction_chsc;
+ u32 instruction_stsi;
+ u32 instruction_stfl;
+ u32 instruction_tprot;
+ u32 instruction_essa;
+ u32 instruction_sigp_sense;
+ u32 instruction_sigp_sense_running;
+ u32 instruction_sigp_external_call;
+ u32 instruction_sigp_emergency;
+ u32 instruction_sigp_cond_emergency;
+ u32 instruction_sigp_start;
+ u32 instruction_sigp_stop;
+ u32 instruction_sigp_stop_store_status;
+ u32 instruction_sigp_store_status;
+ u32 instruction_sigp_store_adtl_status;
+ u32 instruction_sigp_arch;
+ u32 instruction_sigp_prefix;
+ u32 instruction_sigp_restart;
+ u32 instruction_sigp_init_cpu_reset;
+ u32 instruction_sigp_cpu_reset;
+ u32 instruction_sigp_unknown;
+ u32 diagnose_10;
+ u32 diagnose_44;
+ u32 diagnose_9c;
+};
+
+#define PGM_OPERATION 0x01
+#define PGM_PRIVILEGED_OP 0x02
+#define PGM_EXECUTE 0x03
+#define PGM_PROTECTION 0x04
+#define PGM_ADDRESSING 0x05
+#define PGM_SPECIFICATION 0x06
+#define PGM_DATA 0x07
+#define PGM_FIXED_POINT_OVERFLOW 0x08
+#define PGM_FIXED_POINT_DIVIDE 0x09
+#define PGM_DECIMAL_OVERFLOW 0x0a
+#define PGM_DECIMAL_DIVIDE 0x0b
+#define PGM_HFP_EXPONENT_OVERFLOW 0x0c
+#define PGM_HFP_EXPONENT_UNDERFLOW 0x0d
+#define PGM_HFP_SIGNIFICANCE 0x0e
+#define PGM_HFP_DIVIDE 0x0f
+#define PGM_SEGMENT_TRANSLATION 0x10
+#define PGM_PAGE_TRANSLATION 0x11
+#define PGM_TRANSLATION_SPEC 0x12
+#define PGM_SPECIAL_OPERATION 0x13
+#define PGM_OPERAND 0x15
+#define PGM_TRACE_TABEL 0x16
+#define PGM_VECTOR_PROCESSING 0x1b
+#define PGM_SPACE_SWITCH 0x1c
+#define PGM_HFP_SQUARE_ROOT 0x1d
+#define PGM_PC_TRANSLATION_SPEC 0x1f
+#define PGM_AFX_TRANSLATION 0x20
+#define PGM_ASX_TRANSLATION 0x21
+#define PGM_LX_TRANSLATION 0x22
+#define PGM_EX_TRANSLATION 0x23
+#define PGM_PRIMARY_AUTHORITY 0x24
+#define PGM_SECONDARY_AUTHORITY 0x25
+#define PGM_LFX_TRANSLATION 0x26
+#define PGM_LSX_TRANSLATION 0x27
+#define PGM_ALET_SPECIFICATION 0x28
+#define PGM_ALEN_TRANSLATION 0x29
+#define PGM_ALE_SEQUENCE 0x2a
+#define PGM_ASTE_VALIDITY 0x2b
+#define PGM_ASTE_SEQUENCE 0x2c
+#define PGM_EXTENDED_AUTHORITY 0x2d
+#define PGM_LSTE_SEQUENCE 0x2e
+#define PGM_ASTE_INSTANCE 0x2f
+#define PGM_STACK_FULL 0x30
+#define PGM_STACK_EMPTY 0x31
+#define PGM_STACK_SPECIFICATION 0x32
+#define PGM_STACK_TYPE 0x33
+#define PGM_STACK_OPERATION 0x34
+#define PGM_ASCE_TYPE 0x38
+#define PGM_REGION_FIRST_TRANS 0x39
+#define PGM_REGION_SECOND_TRANS 0x3a
+#define PGM_REGION_THIRD_TRANS 0x3b
+#define PGM_MONITOR 0x40
+#define PGM_PER 0x80
+#define PGM_CRYPTO_OPERATION 0x119
+
+/* irq types in order of priority */
+enum irq_types {
+ IRQ_PEND_MCHK_EX = 0,
+ IRQ_PEND_SVC,
+ IRQ_PEND_PROG,
+ IRQ_PEND_MCHK_REP,
+ IRQ_PEND_EXT_IRQ_KEY,
+ IRQ_PEND_EXT_MALFUNC,
+ IRQ_PEND_EXT_EMERGENCY,
+ IRQ_PEND_EXT_EXTERNAL,
+ IRQ_PEND_EXT_CLOCK_COMP,
+ IRQ_PEND_EXT_CPU_TIMER,
+ IRQ_PEND_EXT_TIMING,
+ IRQ_PEND_EXT_SERVICE,
+ IRQ_PEND_EXT_HOST,
+ IRQ_PEND_PFAULT_INIT,
+ IRQ_PEND_PFAULT_DONE,
+ IRQ_PEND_VIRTIO,
+ IRQ_PEND_IO_ISC_0,
+ IRQ_PEND_IO_ISC_1,
+ IRQ_PEND_IO_ISC_2,
+ IRQ_PEND_IO_ISC_3,
+ IRQ_PEND_IO_ISC_4,
+ IRQ_PEND_IO_ISC_5,
+ IRQ_PEND_IO_ISC_6,
+ IRQ_PEND_IO_ISC_7,
+ IRQ_PEND_SIGP_STOP,
+ IRQ_PEND_RESTART,
+ IRQ_PEND_SET_PREFIX,
+ IRQ_PEND_COUNT
+};
+
+/* We have 2M for virtio device descriptor pages. Smallest amount of
+ * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
+ */
+#define KVM_S390_MAX_VIRTIO_IRQS 87381
+
+/*
+ * Repressible (non-floating) machine check interrupts
+ * subclass bits in MCIC
+ */
+#define MCHK_EXTD_BIT 58
+#define MCHK_DEGR_BIT 56
+#define MCHK_WARN_BIT 55
+#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \
+ (1UL << MCHK_EXTD_BIT) | \
+ (1UL << MCHK_WARN_BIT))
+
+/* Exigent machine check interrupts subclass bits in MCIC */
+#define MCHK_SD_BIT 63
+#define MCHK_PD_BIT 62
+#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT))
+
+#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY) | \
+ (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
+ (1UL << IRQ_PEND_EXT_CPU_TIMER) | \
+ (1UL << IRQ_PEND_EXT_MALFUNC) | \
+ (1UL << IRQ_PEND_EXT_EMERGENCY) | \
+ (1UL << IRQ_PEND_EXT_EXTERNAL) | \
+ (1UL << IRQ_PEND_EXT_TIMING) | \
+ (1UL << IRQ_PEND_EXT_HOST) | \
+ (1UL << IRQ_PEND_EXT_SERVICE) | \
+ (1UL << IRQ_PEND_VIRTIO) | \
+ (1UL << IRQ_PEND_PFAULT_INIT) | \
+ (1UL << IRQ_PEND_PFAULT_DONE))
+
+#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \
+ (1UL << IRQ_PEND_IO_ISC_1) | \
+ (1UL << IRQ_PEND_IO_ISC_2) | \
+ (1UL << IRQ_PEND_IO_ISC_3) | \
+ (1UL << IRQ_PEND_IO_ISC_4) | \
+ (1UL << IRQ_PEND_IO_ISC_5) | \
+ (1UL << IRQ_PEND_IO_ISC_6) | \
+ (1UL << IRQ_PEND_IO_ISC_7))
+
+#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
+ (1UL << IRQ_PEND_MCHK_EX))
+
+struct kvm_s390_interrupt_info {
+ struct list_head list;
+ u64 type;
+ union {
+ struct kvm_s390_io_info io;
+ struct kvm_s390_ext_info ext;
+ struct kvm_s390_pgm_info pgm;
+ struct kvm_s390_emerg_info emerg;
+ struct kvm_s390_extcall_info extcall;
+ struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
+ struct kvm_s390_mchk_info mchk;
+ };
+};
+
+struct kvm_s390_irq_payload {
+ struct kvm_s390_io_info io;
+ struct kvm_s390_ext_info ext;
+ struct kvm_s390_pgm_info pgm;
+ struct kvm_s390_emerg_info emerg;
+ struct kvm_s390_extcall_info extcall;
+ struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
+ struct kvm_s390_mchk_info mchk;
+};
+
+struct kvm_s390_local_interrupt {
+ spinlock_t lock;
+ struct kvm_s390_float_interrupt *float_int;
+ struct swait_head *wq;
+ atomic_t *cpuflags;
+ DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
+ struct kvm_s390_irq_payload irq;
+ unsigned long pending_irqs;
+};
+
+#define FIRQ_LIST_IO_ISC_0 0
+#define FIRQ_LIST_IO_ISC_1 1
+#define FIRQ_LIST_IO_ISC_2 2
+#define FIRQ_LIST_IO_ISC_3 3
+#define FIRQ_LIST_IO_ISC_4 4
+#define FIRQ_LIST_IO_ISC_5 5
+#define FIRQ_LIST_IO_ISC_6 6
+#define FIRQ_LIST_IO_ISC_7 7
+#define FIRQ_LIST_PFAULT 8
+#define FIRQ_LIST_VIRTIO 9
+#define FIRQ_LIST_COUNT 10
+#define FIRQ_CNTR_IO 0
+#define FIRQ_CNTR_SERVICE 1
+#define FIRQ_CNTR_VIRTIO 2
+#define FIRQ_CNTR_PFAULT 3
+#define FIRQ_MAX_COUNT 4
+
+struct kvm_s390_float_interrupt {
+ unsigned long pending_irqs;
+ spinlock_t lock;
+ struct list_head lists[FIRQ_LIST_COUNT];
+ int counters[FIRQ_MAX_COUNT];
+ struct kvm_s390_mchk_info mchk;
+ struct kvm_s390_ext_info srv_signal;
+ int next_rr_cpu;
+ unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+};
+
+struct kvm_hw_wp_info_arch {
+ unsigned long addr;
+ unsigned long phys_addr;
+ int len;
+ char *old_data;
+};
+
+struct kvm_hw_bp_info_arch {
+ unsigned long addr;
+ int len;
+};
+
+/*
+ * Only the upper 16 bits of kvm_guest_debug->control are arch specific.
+ * Further KVM_GUESTDBG flags which an be used from userspace can be found in
+ * arch/s390/include/uapi/asm/kvm.h
+ */
+#define KVM_GUESTDBG_EXIT_PENDING 0x10000000
+
+#define guestdbg_enabled(vcpu) \
+ (vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
+#define guestdbg_sstep_enabled(vcpu) \
+ (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+#define guestdbg_hw_bp_enabled(vcpu) \
+ (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
+#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
+ (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))
+
+struct kvm_guestdbg_info_arch {
+ unsigned long cr0;
+ unsigned long cr9;
+ unsigned long cr10;
+ unsigned long cr11;
+ struct kvm_hw_bp_info_arch *hw_bp_info;
+ struct kvm_hw_wp_info_arch *hw_wp_info;
+ int nr_hw_bp;
+ int nr_hw_wp;
+ unsigned long last_bp;
+};
+
+struct kvm_vcpu_arch {
+ struct kvm_s390_sie_block *sie_block;
+ s390_fp_regs host_fpregs;
+ unsigned int host_acrs[NUM_ACRS];
+ s390_fp_regs guest_fpregs;
+ struct kvm_s390_vregs *host_vregs;
+ struct kvm_s390_local_interrupt local_int;
+ struct hrtimer ckc_timer;
+ struct kvm_s390_pgm_info pgm;
+ union {
+ struct cpuid cpu_id;
+ u64 stidp_data;
+ };
+ struct gmap *gmap;
+ struct kvm_guestdbg_info_arch guestdbg;
+ unsigned long pfault_token;
+ unsigned long pfault_select;
+ unsigned long pfault_compare;
+};
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct s390_map_info {
+ struct list_head list;
+ __u64 guest_addr;
+ __u64 addr;
+ struct page *page;
+};
+
+struct s390_io_adapter {
+ unsigned int id;
+ int isc;
+ bool maskable;
+ bool masked;
+ bool swap;
+ struct rw_semaphore maps_lock;
+ struct list_head maps;
+ atomic_t nr_maps;
+};
+
+#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
+#define MAX_S390_ADAPTER_MAPS 256
+
+/* maximum size of facilities and facility mask is 2k bytes */
+#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
+#define S390_ARCH_FAC_LIST_SIZE_U64 \
+ (S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
+#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
+#define S390_ARCH_FAC_MASK_SIZE_U64 \
+ (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
+
+struct kvm_s390_fac {
+ /* facility list requested by guest */
+ __u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
+ /* facility mask supported by kvm & hosting machine */
+ __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
+};
+
+struct kvm_s390_cpu_model {
+ struct kvm_s390_fac *fac;
+ struct cpuid cpu_id;
+ unsigned short ibc;
+};
+
+struct kvm_s390_crypto {
+ struct kvm_s390_crypto_cb *crycb;
+ __u32 crycbd;
+ __u8 aes_kw;
+ __u8 dea_kw;
+};
+
+struct kvm_s390_crypto_cb {
+ __u8 reserved00[72]; /* 0x0000 */
+ __u8 dea_wrapping_key_mask[24]; /* 0x0048 */
+ __u8 aes_wrapping_key_mask[32]; /* 0x0060 */
+ __u8 reserved80[128]; /* 0x0080 */
+};
+
+struct kvm_arch{
+ struct sca_block *sca;
+ debug_info_t *dbf;
+ struct kvm_s390_float_interrupt float_int;
+ struct kvm_device *flic;
+ struct gmap *gmap;
+ int css_support;
+ int use_irqchip;
+ int use_cmma;
+ int user_cpu_state_ctrl;
+ int user_sigp;
+ int user_stsi;
+ struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
+ wait_queue_head_t ipte_wq;
+ int ipte_lock_count;
+ struct mutex ipte_mutex;
+ spinlock_t start_stop_lock;
+ struct kvm_s390_cpu_model model;
+ struct kvm_s390_crypto crypto;
+ u64 epoch;
+};
+
+#define KVM_HVA_ERR_BAD (-1UL)
+#define KVM_HVA_ERR_RO_BAD (-2UL)
+
+static inline bool kvm_is_error_hva(unsigned long addr)
+{
+ return IS_ERR_VALUE(addr);
+}
+
+#define ASYNC_PF_PER_VCPU 64
+struct kvm_arch_async_pf {
+ unsigned long pfault_token;
+};
+
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
+
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf *work);
+
+void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf *work);
+
+void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf *work);
+
+extern int sie64a(struct kvm_s390_sie_block *, u64 *);
+extern char sie_exit;
+
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_check_processor_compat(void *rtn) {}
+static inline void kvm_arch_exit(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_free_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
+static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
+static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot) {}
+
+#endif
diff --git a/kernel/arch/s390/include/asm/kvm_para.h b/kernel/arch/s390/include/asm/kvm_para.h
new file mode 100644
index 000000000..e0f842308
--- /dev/null
+++ b/kernel/arch/s390/include/asm/kvm_para.h
@@ -0,0 +1,157 @@
+/*
+ * definition for paravirtual devices on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+/*
+ * Hypercalls for KVM on s390. The calling convention is similar to the
+ * s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1
+ * as hypercall number and R7 as parameter 6. The return value is
+ * written to R2. We use the diagnose instruction as hypercall. To avoid
+ * conflicts with existing diagnoses for LPAR and z/VM, we do not use
+ * the instruction encoded number, but specify the number in R1 and
+ * use 0x500 as KVM hypercall
+ *
+ * Copyright IBM Corp. 2007,2008
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+#ifndef __S390_KVM_PARA_H
+#define __S390_KVM_PARA_H
+
+#include <uapi/asm/kvm_para.h>
+
+
+
+static inline long kvm_hypercall0(unsigned long nr)
+{
+ register unsigned long __nr asm("1") = nr;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr): "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
+ unsigned long p2)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
+ : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3) : "memory", "cc");
+ return __rc;
+}
+
+
+static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register unsigned long __p5 asm("6") = p5;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4), "d" (__p5) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5,
+ unsigned long p6)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register unsigned long __p5 asm("6") = p5;
+ register unsigned long __p6 asm("7") = p6;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
+ : "memory", "cc");
+ return __rc;
+}
+
+/* kvm on s390 is always paravirtualization enabled */
+static inline int kvm_para_available(void)
+{
+ return 1;
+}
+
+/* No feature bits are currently assigned for kvm on s390 */
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
+#endif /* __S390_KVM_PARA_H */
diff --git a/kernel/arch/s390/include/asm/linkage.h b/kernel/arch/s390/include/asm/linkage.h
new file mode 100644
index 000000000..fc8a82847
--- /dev/null
+++ b/kernel/arch/s390/include/asm/linkage.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#include <linux/stringify.h>
+
+#define __ALIGN .align 4, 0x07
+#define __ALIGN_STR __stringify(__ALIGN)
+
+#endif
diff --git a/kernel/arch/s390/include/asm/livepatch.h b/kernel/arch/s390/include/asm/livepatch.h
new file mode 100644
index 000000000..7aa799134
--- /dev/null
+++ b/kernel/arch/s390/include/asm/livepatch.h
@@ -0,0 +1,43 @@
+/*
+ * livepatch.h - s390-specific Kernel Live Patching Core
+ *
+ * Copyright (c) 2013-2015 SUSE
+ * Authors: Jiri Kosina
+ * Vojtech Pavlik
+ * Jiri Slaby
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef ASM_LIVEPATCH_H
+#define ASM_LIVEPATCH_H
+
+#include <linux/module.h>
+
+#ifdef CONFIG_LIVEPATCH
+static inline int klp_check_compiler_support(void)
+{
+ return 0;
+}
+
+static inline int klp_write_module_reloc(struct module *mod, unsigned long
+ type, unsigned long loc, unsigned long value)
+{
+ /* not supported yet */
+ return -ENOSYS;
+}
+
+static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->psw.addr = ip;
+}
+#else
+#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#endif
+
+#endif
diff --git a/kernel/arch/s390/include/asm/local.h b/kernel/arch/s390/include/asm/local.h
new file mode 100644
index 000000000..c11c530f7
--- /dev/null
+++ b/kernel/arch/s390/include/asm/local.h
@@ -0,0 +1 @@
+#include <asm-generic/local.h>
diff --git a/kernel/arch/s390/include/asm/local64.h b/kernel/arch/s390/include/asm/local64.h
new file mode 100644
index 000000000..36c93b5cc
--- /dev/null
+++ b/kernel/arch/s390/include/asm/local64.h
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/kernel/arch/s390/include/asm/lowcore.h b/kernel/arch/s390/include/asm/lowcore.h
new file mode 100644
index 000000000..663f23e37
--- /dev/null
+++ b/kernel/arch/s390/include/asm/lowcore.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright IBM Corp. 1999, 2012
+ * Author(s): Hartmut Penner <hp@de.ibm.com>,
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Denis Joseph Barrow,
+ */
+
+#ifndef _ASM_S390_LOWCORE_H
+#define _ASM_S390_LOWCORE_H
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+#include <asm/cpu.h>
+#include <asm/types.h>
+
+#define LC_ORDER 1
+#define LC_PAGES 2
+
+struct save_area {
+ u64 fp_regs[16];
+ u64 gp_regs[16];
+ u8 psw[16];
+ u8 pad1[8];
+ u32 pref_reg;
+ u32 fp_ctrl_reg;
+ u8 pad2[4];
+ u32 tod_reg;
+ u64 timer;
+ u64 clk_cmp;
+ u8 pad3[8];
+ u32 acc_regs[16];
+ u64 ctrl_regs[16];
+} __packed;
+
+struct save_area_ext {
+ struct save_area sa;
+ __vector128 vx_regs[32];
+};
+
+struct _lowcore {
+ __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
+ __u32 ipl_parmblock_ptr; /* 0x0014 */
+ __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */
+ __u32 ext_params; /* 0x0080 */
+ __u16 ext_cpu_addr; /* 0x0084 */
+ __u16 ext_int_code; /* 0x0086 */
+ __u16 svc_ilc; /* 0x0088 */
+ __u16 svc_code; /* 0x008a */
+ __u16 pgm_ilc; /* 0x008c */
+ __u16 pgm_code; /* 0x008e */
+ __u32 data_exc_code; /* 0x0090 */
+ __u16 mon_class_num; /* 0x0094 */
+ __u8 per_code; /* 0x0096 */
+ __u8 per_atmid; /* 0x0097 */
+ __u64 per_address; /* 0x0098 */
+ __u8 exc_access_id; /* 0x00a0 */
+ __u8 per_access_id; /* 0x00a1 */
+ __u8 op_access_id; /* 0x00a2 */
+ __u8 ar_mode_id; /* 0x00a3 */
+ __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */
+ __u64 trans_exc_code; /* 0x00a8 */
+ __u64 monitor_code; /* 0x00b0 */
+ __u16 subchannel_id; /* 0x00b8 */
+ __u16 subchannel_nr; /* 0x00ba */
+ __u32 io_int_parm; /* 0x00bc */
+ __u32 io_int_word; /* 0x00c0 */
+ __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
+ __u32 stfl_fac_list; /* 0x00c8 */
+ __u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */
+ __u32 mcck_interruption_code[2]; /* 0x00e8 */
+ __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
+ __u32 external_damage_code; /* 0x00f4 */
+ __u64 failing_storage_address; /* 0x00f8 */
+ __u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
+ __u64 breaking_event_addr; /* 0x0110 */
+ __u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
+ psw_t restart_old_psw; /* 0x0120 */
+ psw_t external_old_psw; /* 0x0130 */
+ psw_t svc_old_psw; /* 0x0140 */
+ psw_t program_old_psw; /* 0x0150 */
+ psw_t mcck_old_psw; /* 0x0160 */
+ psw_t io_old_psw; /* 0x0170 */
+ __u8 pad_0x0180[0x01a0-0x0180]; /* 0x0180 */
+ psw_t restart_psw; /* 0x01a0 */
+ psw_t external_new_psw; /* 0x01b0 */
+ psw_t svc_new_psw; /* 0x01c0 */
+ psw_t program_new_psw; /* 0x01d0 */
+ psw_t mcck_new_psw; /* 0x01e0 */
+ psw_t io_new_psw; /* 0x01f0 */
+
+ /* Save areas. */
+ __u64 save_area_sync[8]; /* 0x0200 */
+ __u64 save_area_async[8]; /* 0x0240 */
+ __u64 save_area_restart[1]; /* 0x0280 */
+
+ /* CPU flags. */
+ __u64 cpu_flags; /* 0x0288 */
+
+ /* Return psws. */
+ psw_t return_psw; /* 0x0290 */
+ psw_t return_mcck_psw; /* 0x02a0 */
+
+ /* CPU accounting and timing values. */
+ __u64 sync_enter_timer; /* 0x02b0 */
+ __u64 async_enter_timer; /* 0x02b8 */
+ __u64 mcck_enter_timer; /* 0x02c0 */
+ __u64 exit_timer; /* 0x02c8 */
+ __u64 user_timer; /* 0x02d0 */
+ __u64 system_timer; /* 0x02d8 */
+ __u64 steal_timer; /* 0x02e0 */
+ __u64 last_update_timer; /* 0x02e8 */
+ __u64 last_update_clock; /* 0x02f0 */
+ __u64 int_clock; /* 0x02f8 */
+ __u64 mcck_clock; /* 0x0300 */
+ __u64 clock_comparator; /* 0x0308 */
+
+ /* Current process. */
+ __u64 current_task; /* 0x0310 */
+ __u64 thread_info; /* 0x0318 */
+ __u64 kernel_stack; /* 0x0320 */
+
+ /* Interrupt, panic and restart stack. */
+ __u64 async_stack; /* 0x0328 */
+ __u64 panic_stack; /* 0x0330 */
+ __u64 restart_stack; /* 0x0338 */
+
+ /* Restart function and parameter. */
+ __u64 restart_fn; /* 0x0340 */
+ __u64 restart_data; /* 0x0348 */
+ __u64 restart_source; /* 0x0350 */
+
+ /* Address space pointer. */
+ __u64 kernel_asce; /* 0x0358 */
+ __u64 user_asce; /* 0x0360 */
+ __u64 current_pid; /* 0x0368 */
+
+ /* SMP info area */
+ __u32 cpu_nr; /* 0x0370 */
+ __u32 softirq_pending; /* 0x0374 */
+ __u64 percpu_offset; /* 0x0378 */
+ __u64 vdso_per_cpu_data; /* 0x0380 */
+ __u64 machine_flags; /* 0x0388 */
+ __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
+ __u64 gmap; /* 0x0398 */
+ __u32 spinlock_lockval; /* 0x03a0 */
+ __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
+
+ /* Per cpu primary space access list */
+ __u32 paste[16]; /* 0x0400 */
+
+ __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
+
+ /*
+ * 0xe00 contains the address of the IPL Parameter Information
+ * block. Dump tools need IPIB for IPL after dump.
+ * Note: do not change the position of any fields in 0x0e00-0x0f00
+ */
+ __u64 ipib; /* 0x0e00 */
+ __u32 ipib_checksum; /* 0x0e08 */
+ __u64 vmcore_info; /* 0x0e0c */
+ __u8 pad_0x0e14[0x0e18-0x0e14]; /* 0x0e14 */
+ __u64 os_info; /* 0x0e18 */
+ __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
+
+ /* Extended facility list */
+ __u64 stfle_fac_list[32]; /* 0x0f00 */
+ __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
+
+ /* Pointer to vector register save area */
+ __u64 vector_save_area_addr; /* 0x11b0 */
+
+ /* 64 bit extparam used for pfault/diag 250: defined by architecture */
+ __u64 ext_params2; /* 0x11B8 */
+ __u8 pad_0x11c0[0x1200-0x11C0]; /* 0x11C0 */
+
+ /* CPU register save area: defined by architecture */
+ __u64 floating_pt_save_area[16]; /* 0x1200 */
+ __u64 gpregs_save_area[16]; /* 0x1280 */
+ psw_t psw_save_area; /* 0x1300 */
+ __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */
+ __u32 prefixreg_save_area; /* 0x1318 */
+ __u32 fpt_creg_save_area; /* 0x131c */
+ __u8 pad_0x1320[0x1324-0x1320]; /* 0x1320 */
+ __u32 tod_progreg_save_area; /* 0x1324 */
+ __u32 cpu_timer_save_area[2]; /* 0x1328 */
+ __u32 clock_comp_save_area[2]; /* 0x1330 */
+ __u8 pad_0x1338[0x1340-0x1338]; /* 0x1338 */
+ __u32 access_regs_save_area[16]; /* 0x1340 */
+ __u64 cregs_save_area[16]; /* 0x1380 */
+ __u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */
+
+ /* Transaction abort diagnostic block */
+ __u8 pgm_tdb[256]; /* 0x1800 */
+ __u8 pad_0x1900[0x1c00-0x1900]; /* 0x1900 */
+
+ /* Software defined save area for vector registers */
+ __u8 vector_save_area[1024]; /* 0x1c00 */
+} __packed;
+
+#define S390_lowcore (*((struct _lowcore *) 0))
+
+extern struct _lowcore *lowcore_ptr[];
+
+static inline void set_prefix(__u32 address)
+{
+ asm volatile("spx %0" : : "m" (address) : "memory");
+}
+
+static inline __u32 store_prefix(void)
+{
+ __u32 address;
+
+ asm volatile("stpx %0" : "=m" (address));
+ return address;
+}
+
+#endif /* _ASM_S390_LOWCORE_H */
diff --git a/kernel/arch/s390/include/asm/mathemu.h b/kernel/arch/s390/include/asm/mathemu.h
new file mode 100644
index 000000000..614dfaf47
--- /dev/null
+++ b/kernel/arch/s390/include/asm/mathemu.h
@@ -0,0 +1,28 @@
+/*
+ * IEEE floating point emulation.
+ *
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#ifndef __MATHEMU__
+#define __MATHEMU__
+
+extern int math_emu_b3(__u8 *, struct pt_regs *);
+extern int math_emu_ed(__u8 *, struct pt_regs *);
+extern int math_emu_ldr(__u8 *);
+extern int math_emu_ler(__u8 *);
+extern int math_emu_std(__u8 *, struct pt_regs *);
+extern int math_emu_ld(__u8 *, struct pt_regs *);
+extern int math_emu_ste(__u8 *, struct pt_regs *);
+extern int math_emu_le(__u8 *, struct pt_regs *);
+extern int math_emu_lfpc(__u8 *, struct pt_regs *);
+extern int math_emu_stfpc(__u8 *, struct pt_regs *);
+extern int math_emu_srnm(__u8 *, struct pt_regs *);
+
+#endif /* __MATHEMU__ */
+
+
+
+
diff --git a/kernel/arch/s390/include/asm/mman.h b/kernel/arch/s390/include/asm/mman.h
new file mode 100644
index 000000000..b55a59e1d
--- /dev/null
+++ b/kernel/arch/s390/include/asm/mman.h
@@ -0,0 +1,15 @@
+/*
+ * S390 version
+ *
+ * Derived from "include/asm-i386/mman.h"
+ */
+#ifndef __S390_MMAN_H__
+#define __S390_MMAN_H__
+
+#include <uapi/asm/mman.h>
+
+#ifndef __ASSEMBLY__
+int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
+#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags)
+#endif
+#endif /* __S390_MMAN_H__ */
diff --git a/kernel/arch/s390/include/asm/mmu.h b/kernel/arch/s390/include/asm/mmu.h
new file mode 100644
index 000000000..d29ad9545
--- /dev/null
+++ b/kernel/arch/s390/include/asm/mmu.h
@@ -0,0 +1,44 @@
+#ifndef __MMU_H
+#define __MMU_H
+
+#include <linux/cpumask.h>
+#include <linux/errno.h>
+
+typedef struct {
+ cpumask_t cpu_attach_mask;
+ atomic_t attach_count;
+ unsigned int flush_mm;
+ spinlock_t list_lock;
+ struct list_head pgtable_list;
+ struct list_head gmap_list;
+ unsigned long asce_bits;
+ unsigned long asce_limit;
+ unsigned long vdso_base;
+ /* The mmu context allocates 4K page tables. */
+ unsigned int alloc_pgste:1;
+ /* The mmu context uses extended page tables. */
+ unsigned int has_pgste:1;
+ /* The mmu context uses storage keys. */
+ unsigned int use_skey:1;
+} mm_context_t;
+
+#define INIT_MM_CONTEXT(name) \
+ .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
+ .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
+ .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
+
+static inline int tprot(unsigned long addr)
+{
+ int rc = -EFAULT;
+
+ asm volatile(
+ " tprot 0(%1),0\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (rc) : "a" (addr) : "cc");
+ return rc;
+}
+
+#endif
diff --git a/kernel/arch/s390/include/asm/mmu_context.h b/kernel/arch/s390/include/asm/mmu_context.h
new file mode 100644
index 000000000..fb1b93ea3
--- /dev/null
+++ b/kernel/arch/s390/include/asm/mmu_context.h
@@ -0,0 +1,133 @@
+/*
+ * S390 version
+ *
+ * Derived from "include/asm-i386/mmu_context.h"
+ */
+
+#ifndef __S390_MMU_CONTEXT_H
+#define __S390_MMU_CONTEXT_H
+
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+#include <asm/tlbflush.h>
+#include <asm/ctl_reg.h>
+
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ cpumask_clear(&mm->context.cpu_attach_mask);
+ atomic_set(&mm->context.attach_count, 0);
+ mm->context.flush_mm = 0;
+ mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
+ mm->context.asce_bits |= _ASCE_TYPE_REGION3;
+#ifdef CONFIG_PGSTE
+ mm->context.alloc_pgste = page_table_allocate_pgste;
+ mm->context.has_pgste = 0;
+ mm->context.use_skey = 0;
+#endif
+ mm->context.asce_limit = STACK_TOP_MAX;
+ crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
+ return 0;
+}
+
+#define destroy_context(mm) do { } while (0)
+
+static inline void set_user_asce(struct mm_struct *mm)
+{
+ S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
+ if (current->thread.mm_segment.ar4)
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
+ set_cpu_flag(CIF_ASCE);
+}
+
+static inline void clear_user_asce(void)
+{
+ S390_lowcore.user_asce = S390_lowcore.kernel_asce;
+
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
+}
+
+static inline void load_kernel_asce(void)
+{
+ unsigned long asce;
+
+ __ctl_store(asce, 1, 1);
+ if (asce != S390_lowcore.kernel_asce)
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ set_cpu_flag(CIF_ASCE);
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ int cpu = smp_processor_id();
+
+ S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
+ if (prev == next)
+ return;
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
+ /* Clear old ASCE by loading the kernel ASCE. */
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ __ctl_load(S390_lowcore.kernel_asce, 7, 7);
+ atomic_inc(&next->context.attach_count);
+ atomic_dec(&prev->context.attach_count);
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
+}
+
+#define finish_arch_post_lock_switch finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+
+ load_kernel_asce();
+ if (mm) {
+ preempt_disable();
+ while (atomic_read(&mm->context.attach_count) >> 16)
+ cpu_relax();
+
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+ if (mm->context.flush_mm)
+ __tlb_flush_mm(mm);
+ preempt_enable();
+ }
+ set_fs(current->thread.mm_segment);
+}
+
+#define enter_lazy_tlb(mm,tsk) do { } while (0)
+#define deactivate_mm(tsk,mm) do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev,
+ struct mm_struct *next)
+{
+ switch_mm(prev, next, current);
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+ set_user_asce(next);
+}
+
+static inline void arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+ if (oldmm->context.asce_limit < mm->context.asce_limit)
+ crst_table_downgrade(mm, oldmm->context.asce_limit);
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
+static inline void arch_unmap(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void arch_bprm_mm_init(struct mm_struct *mm,
+ struct vm_area_struct *vma)
+{
+}
+
+#endif /* __S390_MMU_CONTEXT_H */
diff --git a/kernel/arch/s390/include/asm/module.h b/kernel/arch/s390/include/asm/module.h
new file mode 100644
index 000000000..df1f861a8
--- /dev/null
+++ b/kernel/arch/s390/include/asm/module.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_S390_MODULE_H
+#define _ASM_S390_MODULE_H
+
+#include <asm-generic/module.h>
+
+/*
+ * This file contains the s390 architecture specific module code.
+ */
+
+struct mod_arch_syminfo
+{
+ unsigned long got_offset;
+ unsigned long plt_offset;
+ int got_initialized;
+ int plt_initialized;
+};
+
+struct mod_arch_specific
+{
+ /* Starting offset of got in the module core memory. */
+ unsigned long got_offset;
+ /* Starting offset of plt in the module core memory. */
+ unsigned long plt_offset;
+ /* Size of the got. */
+ unsigned long got_size;
+ /* Size of the plt. */
+ unsigned long plt_size;
+ /* Number of symbols in syminfo. */
+ int nsyms;
+ /* Additional symbol information (got and plt offsets). */
+ struct mod_arch_syminfo *syminfo;
+};
+
+#endif /* _ASM_S390_MODULE_H */
diff --git a/kernel/arch/s390/include/asm/mutex.h b/kernel/arch/s390/include/asm/mutex.h
new file mode 100644
index 000000000..458c1f7fb
--- /dev/null
+++ b/kernel/arch/s390/include/asm/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/kernel/arch/s390/include/asm/nmi.h b/kernel/arch/s390/include/asm/nmi.h
new file mode 100644
index 000000000..3027a5a72
--- /dev/null
+++ b/kernel/arch/s390/include/asm/nmi.h
@@ -0,0 +1,66 @@
+/*
+ * Machine check handler definitions
+ *
+ * Copyright IBM Corp. 2000, 2009
+ * Author(s): Ingo Adlung <adlung@de.ibm.com>,
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Cornelia Huck <cornelia.huck@de.ibm.com>,
+ * Heiko Carstens <heiko.carstens@de.ibm.com>,
+ */
+
+#ifndef _ASM_S390_NMI_H
+#define _ASM_S390_NMI_H
+
+#include <linux/types.h>
+
+struct mci {
+ __u32 sd : 1; /* 00 system damage */
+ __u32 pd : 1; /* 01 instruction-processing damage */
+ __u32 sr : 1; /* 02 system recovery */
+ __u32 : 1; /* 03 */
+ __u32 cd : 1; /* 04 timing-facility damage */
+ __u32 ed : 1; /* 05 external damage */
+ __u32 : 1; /* 06 */
+ __u32 dg : 1; /* 07 degradation */
+ __u32 w : 1; /* 08 warning pending */
+ __u32 cp : 1; /* 09 channel-report pending */
+ __u32 sp : 1; /* 10 service-processor damage */
+ __u32 ck : 1; /* 11 channel-subsystem damage */
+ __u32 : 2; /* 12-13 */
+ __u32 b : 1; /* 14 backed up */
+ __u32 : 1; /* 15 */
+ __u32 se : 1; /* 16 storage error uncorrected */
+ __u32 sc : 1; /* 17 storage error corrected */
+ __u32 ke : 1; /* 18 storage-key error uncorrected */
+ __u32 ds : 1; /* 19 storage degradation */
+ __u32 wp : 1; /* 20 psw mwp validity */
+ __u32 ms : 1; /* 21 psw mask and key validity */
+ __u32 pm : 1; /* 22 psw program mask and cc validity */
+ __u32 ia : 1; /* 23 psw instruction address validity */
+ __u32 fa : 1; /* 24 failing storage address validity */
+ __u32 vr : 1; /* 25 vector register validity */
+ __u32 ec : 1; /* 26 external damage code validity */
+ __u32 fp : 1; /* 27 floating point register validity */
+ __u32 gr : 1; /* 28 general register validity */
+ __u32 cr : 1; /* 29 control register validity */
+ __u32 : 1; /* 30 */
+ __u32 st : 1; /* 31 storage logical validity */
+ __u32 ie : 1; /* 32 indirect storage error */
+ __u32 ar : 1; /* 33 access register validity */
+ __u32 da : 1; /* 34 delayed access exception */
+ __u32 : 7; /* 35-41 */
+ __u32 pr : 1; /* 42 tod programmable register validity */
+ __u32 fc : 1; /* 43 fp control register validity */
+ __u32 ap : 1; /* 44 ancillary report */
+ __u32 : 1; /* 45 */
+ __u32 ct : 1; /* 46 cpu timer validity */
+ __u32 cc : 1; /* 47 clock comparator validity */
+ __u32 : 16; /* 47-63 */
+};
+
+struct pt_regs;
+
+extern void s390_handle_mcck(void);
+extern void s390_do_machine_check(struct pt_regs *regs);
+
+#endif /* _ASM_S390_NMI_H */
diff --git a/kernel/arch/s390/include/asm/os_info.h b/kernel/arch/s390/include/asm/os_info.h
new file mode 100644
index 000000000..295f2c4f1
--- /dev/null
+++ b/kernel/arch/s390/include/asm/os_info.h
@@ -0,0 +1,49 @@
+/*
+ * OS info memory interface
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+#ifndef _ASM_S390_OS_INFO_H
+#define _ASM_S390_OS_INFO_H
+
+#define OS_INFO_VERSION_MAJOR 1
+#define OS_INFO_VERSION_MINOR 1
+#define OS_INFO_MAGIC 0x4f53494e464f535aULL /* OSINFOSZ */
+
+#define OS_INFO_VMCOREINFO 0
+#define OS_INFO_REIPL_BLOCK 1
+
+struct os_info_entry {
+ u64 addr;
+ u64 size;
+ u32 csum;
+} __packed;
+
+struct os_info {
+ u64 magic;
+ u32 csum;
+ u16 version_major;
+ u16 version_minor;
+ u64 crashkernel_addr;
+ u64 crashkernel_size;
+ struct os_info_entry entry[2];
+ u8 reserved[4024];
+} __packed;
+
+void os_info_init(void);
+void os_info_entry_add(int nr, void *ptr, u64 len);
+void os_info_crashkernel_add(unsigned long base, unsigned long size);
+u32 os_info_csum(struct os_info *os_info);
+
+#ifdef CONFIG_CRASH_DUMP
+void *os_info_old_entry(int nr, unsigned long *size);
+int copy_from_oldmem(void *dest, void *src, size_t count);
+#else
+static inline void *os_info_old_entry(int nr, unsigned long *size)
+{
+ return NULL;
+}
+#endif
+
+#endif /* _ASM_S390_OS_INFO_H */
diff --git a/kernel/arch/s390/include/asm/page.h b/kernel/arch/s390/include/asm/page.h
new file mode 100644
index 000000000..53eacbd4f
--- /dev/null
+++ b/kernel/arch/s390/include/asm/page.h
@@ -0,0 +1,156 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999, 2000
+ * Author(s): Hartmut Penner (hp@de.ibm.com)
+ */
+
+#ifndef _S390_PAGE_H
+#define _S390_PAGE_H
+
+#include <linux/const.h>
+#include <asm/types.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_DEFAULT_ACC 0
+#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
+
+#define HPAGE_SHIFT 20
+#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#define ARCH_HAS_SETCLEAR_HUGE_PTE
+#define ARCH_HAS_HUGE_PTE_TYPE
+#define ARCH_HAS_PREPARE_HUGEPAGE
+#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
+
+#include <asm/setup.h>
+#ifndef __ASSEMBLY__
+
+static inline void storage_key_init_range(unsigned long start, unsigned long end)
+{
+#if PAGE_DEFAULT_KEY
+ __storage_key_init_range(start, end);
+#endif
+}
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+
+/*
+ * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
+ * bypass caches when copying a page. Especially when copying huge pages
+ * this keeps L1 and L2 data caches alive.
+ */
+static inline void copy_page(void *to, void *from)
+{
+ register void *reg2 asm ("2") = to;
+ register unsigned long reg3 asm ("3") = 0x1000;
+ register void *reg4 asm ("4") = from;
+ register unsigned long reg5 asm ("5") = 0xb0001000;
+ asm volatile(
+ " mvcl 2,4"
+ : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
+ : : "memory", "cc");
+}
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+/*
+ * These are used to make use of C type-checking..
+ */
+
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct { unsigned long pgste; } pgste_t;
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pud; } pud_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef pte_t *pgtable_t;
+
+#define pgprot_val(x) ((x).pgprot)
+#define pgste_val(x) ((x).pgste)
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pud_val(x) ((x).pud)
+#define pgd_val(x) ((x).pgd)
+
+#define __pgste(x) ((pgste_t) { (x) } )
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pud(x) ((pud_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+static inline void page_set_storage_key(unsigned long addr,
+ unsigned char skey, int mapped)
+{
+ if (!mapped)
+ asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
+ : : "d" (skey), "a" (addr));
+ else
+ asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
+}
+
+static inline unsigned char page_get_storage_key(unsigned long addr)
+{
+ unsigned char skey;
+
+ asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
+ return skey;
+}
+
+static inline int page_reset_referenced(unsigned long addr)
+{
+ unsigned int ipm;
+
+ asm volatile(
+ " rrbe 0,%1\n"
+ " ipm %0\n"
+ : "=d" (ipm) : "a" (addr) : "cc");
+ return !!(ipm & 0x20000000);
+}
+
+/* Bits int the storage key */
+#define _PAGE_CHANGED 0x02 /* HW changed bit */
+#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
+#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
+#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
+
+struct page;
+void arch_free_page(struct page *page, int order);
+void arch_alloc_page(struct page *page, int order);
+void arch_set_page_states(int make_stable);
+
+static inline int devmem_is_allowed(unsigned long pfn)
+{
+ return 0;
+}
+
+#define HAVE_ARCH_FREE_PAGE
+#define HAVE_ARCH_ALLOC_PAGE
+
+#endif /* !__ASSEMBLY__ */
+
+#define __PAGE_OFFSET 0x0UL
+#define PAGE_OFFSET 0x0UL
+#define __pa(x) (unsigned long)(x)
+#define __va(x) (void *)(unsigned long)(x)
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* _S390_PAGE_H */
diff --git a/kernel/arch/s390/include/asm/pci.h b/kernel/arch/s390/include/asm/pci.h
new file mode 100644
index 000000000..a648338c4
--- /dev/null
+++ b/kernel/arch/s390/include/asm/pci.h
@@ -0,0 +1,191 @@
+#ifndef __ASM_S390_PCI_H
+#define __ASM_S390_PCI_H
+
+/* must be set before including asm-generic/pci.h */
+#define PCI_DMA_BUS_IS_PHYS (0)
+/* must be set before including pci_clp.h */
+#define PCI_BAR_COUNT 6
+
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <asm-generic/pci.h>
+#include <asm-generic/pci-dma-compat.h>
+#include <asm/pci_clp.h>
+#include <asm/pci_debug.h>
+
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+#define pcibios_assign_all_busses() (0)
+
+void __iomem *pci_iomap(struct pci_dev *, int, unsigned long);
+void pci_iounmap(struct pci_dev *, void __iomem *);
+int pci_domain_nr(struct pci_bus *);
+int pci_proc_domain(struct pci_bus *);
+
+#define ZPCI_BUS_NR 0 /* default bus number */
+#define ZPCI_DEVFN 0 /* default device number */
+
+/* PCI Function Controls */
+#define ZPCI_FC_FN_ENABLED 0x80
+#define ZPCI_FC_ERROR 0x40
+#define ZPCI_FC_BLOCKED 0x20
+#define ZPCI_FC_DMA_ENABLED 0x10
+
+struct zpci_fmb {
+ u32 format : 8;
+ u32 dma_valid : 1;
+ u32 : 23;
+ u32 samples;
+ u64 last_update;
+ /* hardware counters */
+ u64 ld_ops;
+ u64 st_ops;
+ u64 stb_ops;
+ u64 rpcit_ops;
+ u64 dma_rbytes;
+ u64 dma_wbytes;
+} __packed __aligned(16);
+
+enum zpci_state {
+ ZPCI_FN_STATE_RESERVED,
+ ZPCI_FN_STATE_STANDBY,
+ ZPCI_FN_STATE_CONFIGURED,
+ ZPCI_FN_STATE_ONLINE,
+ NR_ZPCI_FN_STATES,
+};
+
+struct zpci_bar_struct {
+ struct resource *res; /* bus resource */
+ u32 val; /* bar start & 3 flag bits */
+ u16 map_idx; /* index into bar mapping array */
+ u8 size; /* order 2 exponent */
+};
+
+/* Private data per function */
+struct zpci_dev {
+ struct pci_dev *pdev;
+ struct pci_bus *bus;
+ struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */
+
+ enum zpci_state state;
+ u32 fid; /* function ID, used by sclp */
+ u32 fh; /* function handle, used by insn's */
+ u16 vfn; /* virtual function number */
+ u16 pchid; /* physical channel ID */
+ u8 pfgid; /* function group ID */
+ u8 pft; /* pci function type */
+ u16 domain;
+
+ struct mutex lock;
+ u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
+ u32 uid; /* user defined id */
+ u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
+
+ /* IRQ stuff */
+ u64 msi_addr; /* MSI address */
+ unsigned int max_msi; /* maximum number of MSI's */
+ struct airq_iv *aibv; /* adapter interrupt bit vector */
+ unsigned int aisb; /* number of the summary bit */
+
+ /* DMA stuff */
+ unsigned long *dma_table;
+ spinlock_t dma_table_lock;
+ int tlb_refresh;
+
+ spinlock_t iommu_bitmap_lock;
+ unsigned long *iommu_bitmap;
+ unsigned long iommu_size;
+ unsigned long iommu_pages;
+ unsigned int next_bit;
+
+ char res_name[16];
+ struct zpci_bar_struct bars[PCI_BAR_COUNT];
+
+ u64 start_dma; /* Start of available DMA addresses */
+ u64 end_dma; /* End of available DMA addresses */
+ u64 dma_mask; /* DMA address space mask */
+
+ /* Function measurement block */
+ struct zpci_fmb *fmb;
+ u16 fmb_update; /* update interval */
+ /* software counters */
+ atomic64_t allocated_pages;
+ atomic64_t mapped_pages;
+ atomic64_t unmapped_pages;
+
+ enum pci_bus_speed max_bus_speed;
+
+ struct dentry *debugfs_dev;
+ struct dentry *debugfs_perf;
+};
+
+static inline bool zdev_enabled(struct zpci_dev *zdev)
+{
+ return (zdev->fh & (1UL << 31)) ? true : false;
+}
+
+extern const struct attribute_group *zpci_attr_groups[];
+
+/* -----------------------------------------------------------------------------
+ Prototypes
+----------------------------------------------------------------------------- */
+/* Base stuff */
+int zpci_create_device(struct zpci_dev *);
+int zpci_enable_device(struct zpci_dev *);
+int zpci_disable_device(struct zpci_dev *);
+void zpci_stop_device(struct zpci_dev *);
+int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
+int zpci_unregister_ioat(struct zpci_dev *, u8);
+
+/* CLP */
+int clp_scan_pci_devices(void);
+int clp_rescan_pci_devices(void);
+int clp_rescan_pci_devices_simple(void);
+int clp_add_pci_device(u32, u32, int);
+int clp_enable_fh(struct zpci_dev *, u8);
+int clp_disable_fh(struct zpci_dev *);
+
+#ifdef CONFIG_PCI
+/* Error handling and recovery */
+void zpci_event_error(void *);
+void zpci_event_availability(void *);
+void zpci_rescan(void);
+bool zpci_is_enabled(void);
+#else /* CONFIG_PCI */
+static inline void zpci_event_error(void *e) {}
+static inline void zpci_event_availability(void *e) {}
+static inline void zpci_rescan(void) {}
+#endif /* CONFIG_PCI */
+
+#ifdef CONFIG_HOTPLUG_PCI_S390
+int zpci_init_slot(struct zpci_dev *);
+void zpci_exit_slot(struct zpci_dev *);
+#else /* CONFIG_HOTPLUG_PCI_S390 */
+static inline int zpci_init_slot(struct zpci_dev *zdev)
+{
+ return 0;
+}
+static inline void zpci_exit_slot(struct zpci_dev *zdev) {}
+#endif /* CONFIG_HOTPLUG_PCI_S390 */
+
+/* Helpers */
+struct zpci_dev *get_zdev(struct pci_dev *);
+struct zpci_dev *get_zdev_by_fid(u32);
+
+/* DMA */
+int zpci_dma_init(void);
+void zpci_dma_exit(void);
+
+/* FMB */
+int zpci_fmb_enable_device(struct zpci_dev *);
+int zpci_fmb_disable_device(struct zpci_dev *);
+
+/* Debug */
+int zpci_debug_init(void);
+void zpci_debug_exit(void);
+void zpci_debug_init_device(struct zpci_dev *);
+void zpci_debug_exit_device(struct zpci_dev *);
+void zpci_debug_info(struct zpci_dev *, struct seq_file *);
+
+#endif
diff --git a/kernel/arch/s390/include/asm/pci_clp.h b/kernel/arch/s390/include/asm/pci_clp.h
new file mode 100644
index 000000000..dd78f92f1
--- /dev/null
+++ b/kernel/arch/s390/include/asm/pci_clp.h
@@ -0,0 +1,186 @@
+#ifndef _ASM_S390_PCI_CLP_H
+#define _ASM_S390_PCI_CLP_H
+
+#include <asm/clp.h>
+
+/*
+ * Call Logical Processor - Command Codes
+ */
+#define CLP_LIST_PCI 0x0002
+#define CLP_QUERY_PCI_FN 0x0003
+#define CLP_QUERY_PCI_FNGRP 0x0004
+#define CLP_SET_PCI_FN 0x0005
+
+/* PCI function handle list entry */
+struct clp_fh_list_entry {
+ u16 device_id;
+ u16 vendor_id;
+ u32 config_state : 1;
+ u32 : 31;
+ u32 fid; /* PCI function id */
+ u32 fh; /* PCI function handle */
+} __packed;
+
+#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */
+#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */
+#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */
+#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */
+#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */
+#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */
+#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */
+#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */
+#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */
+#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */
+
+/* request or response block header length */
+#define LIST_PCI_HDR_LEN 32
+
+/* Number of function handles fitting in response block */
+#define CLP_FH_LIST_NR_ENTRIES \
+ ((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \
+ / sizeof(struct clp_fh_list_entry))
+
+#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
+#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
+
+#define CLP_UTIL_STR_LEN 64
+#define CLP_PFIP_NR_SEGMENTS 4
+
+/* List PCI functions request */
+struct clp_req_list_pci {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u64 resume_token;
+ u64 reserved2;
+} __packed;
+
+/* List PCI functions response */
+struct clp_rsp_list_pci {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u64 resume_token;
+ u32 reserved2;
+ u16 max_fn;
+ u8 reserved3;
+ u8 entry_size;
+ struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
+} __packed;
+
+/* Query PCI function request */
+struct clp_req_query_pci {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 fh; /* function handle */
+ u32 reserved2;
+ u64 reserved3;
+} __packed;
+
+/* Query PCI function response */
+struct clp_rsp_query_pci {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 : 64;
+ u16 vfn; /* virtual fn number */
+ u16 : 7;
+ u16 util_str_avail : 1; /* utility string available? */
+ u16 pfgid : 8; /* pci function group id */
+ u32 fid; /* pci function id */
+ u8 bar_size[PCI_BAR_COUNT];
+ u16 pchid;
+ u32 bar[PCI_BAR_COUNT];
+ u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
+ u32 : 24;
+ u8 pft; /* pci function type */
+ u64 sdma; /* start dma as */
+ u64 edma; /* end dma as */
+ u32 reserved[11];
+ u32 uid; /* user defined id */
+ u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
+} __packed;
+
+/* Query PCI function group request */
+struct clp_req_query_pci_grp {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 : 24;
+ u32 pfgid : 8; /* function group id */
+ u32 reserved2;
+ u64 reserved3;
+} __packed;
+
+/* Query PCI function group response */
+struct clp_rsp_query_pci_grp {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u16 : 4;
+ u16 noi : 12; /* number of interrupts */
+ u8 version;
+ u8 : 6;
+ u8 frame : 1;
+ u8 refresh : 1; /* TLB refresh mode */
+ u16 reserved2;
+ u16 mui;
+ u64 reserved3;
+ u64 dasm; /* dma address space mask */
+ u64 msia; /* MSI address */
+ u64 reserved4;
+ u64 reserved5;
+} __packed;
+
+/* Set PCI function request */
+struct clp_req_set_pci {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 fh; /* function handle */
+ u16 reserved2;
+ u8 oc; /* operation controls */
+ u8 ndas; /* number of dma spaces */
+ u64 reserved3;
+} __packed;
+
+/* Set PCI function response */
+struct clp_rsp_set_pci {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 fh; /* function handle */
+ u32 reserved3;
+ u64 reserved4;
+} __packed;
+
+/* Combined request/response block structures used by clp insn */
+struct clp_req_rsp_list_pci {
+ struct clp_req_list_pci request;
+ struct clp_rsp_list_pci response;
+} __packed;
+
+struct clp_req_rsp_set_pci {
+ struct clp_req_set_pci request;
+ struct clp_rsp_set_pci response;
+} __packed;
+
+struct clp_req_rsp_query_pci {
+ struct clp_req_query_pci request;
+ struct clp_rsp_query_pci response;
+} __packed;
+
+struct clp_req_rsp_query_pci_grp {
+ struct clp_req_query_pci_grp request;
+ struct clp_rsp_query_pci_grp response;
+} __packed;
+
+#endif
diff --git a/kernel/arch/s390/include/asm/pci_debug.h b/kernel/arch/s390/include/asm/pci_debug.h
new file mode 100644
index 000000000..ac24b26fc
--- /dev/null
+++ b/kernel/arch/s390/include/asm/pci_debug.h
@@ -0,0 +1,28 @@
+#ifndef _S390_ASM_PCI_DEBUG_H
+#define _S390_ASM_PCI_DEBUG_H
+
+#include <asm/debug.h>
+
+extern debug_info_t *pci_debug_msg_id;
+extern debug_info_t *pci_debug_err_id;
+
+#define zpci_dbg(imp, fmt, args...) \
+ debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
+
+#define zpci_err(text...) \
+ do { \
+ char debug_buffer[16]; \
+ snprintf(debug_buffer, 16, text); \
+ debug_text_event(pci_debug_err_id, 0, debug_buffer); \
+ } while (0)
+
+static inline void zpci_err_hex(void *addr, int len)
+{
+ while (len > 0) {
+ debug_event(pci_debug_err_id, 0, (void *) addr, len);
+ len -= pci_debug_err_id->buf_size;
+ addr += pci_debug_err_id->buf_size;
+ }
+}
+
+#endif
diff --git a/kernel/arch/s390/include/asm/pci_dma.h b/kernel/arch/s390/include/asm/pci_dma.h
new file mode 100644
index 000000000..30b4c179c
--- /dev/null
+++ b/kernel/arch/s390/include/asm/pci_dma.h
@@ -0,0 +1,196 @@
+#ifndef _ASM_S390_PCI_DMA_H
+#define _ASM_S390_PCI_DMA_H
+
+/* I/O Translation Anchor (IOTA) */
+enum zpci_ioat_dtype {
+ ZPCI_IOTA_STO = 0,
+ ZPCI_IOTA_RTTO = 1,
+ ZPCI_IOTA_RSTO = 2,
+ ZPCI_IOTA_RFTO = 3,
+ ZPCI_IOTA_PFAA = 4,
+ ZPCI_IOTA_IOPFAA = 5,
+ ZPCI_IOTA_IOPTO = 7
+};
+
+#define ZPCI_IOTA_IOT_ENABLED 0x800UL
+#define ZPCI_IOTA_DT_ST (ZPCI_IOTA_STO << 2)
+#define ZPCI_IOTA_DT_RT (ZPCI_IOTA_RTTO << 2)
+#define ZPCI_IOTA_DT_RS (ZPCI_IOTA_RSTO << 2)
+#define ZPCI_IOTA_DT_RF (ZPCI_IOTA_RFTO << 2)
+#define ZPCI_IOTA_DT_PF (ZPCI_IOTA_PFAA << 2)
+#define ZPCI_IOTA_FS_4K 0
+#define ZPCI_IOTA_FS_1M 1
+#define ZPCI_IOTA_FS_2G 2
+#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
+
+#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
+#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
+#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
+#define ZPCI_IOTA_RFTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RF)
+#define ZPCI_IOTA_RFAA_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_PF | ZPCI_IOTA_FS_2G)
+
+/* I/O Region and segment tables */
+#define ZPCI_INDEX_MASK 0x7ffUL
+
+#define ZPCI_TABLE_TYPE_MASK 0xc
+#define ZPCI_TABLE_TYPE_RFX 0xc
+#define ZPCI_TABLE_TYPE_RSX 0x8
+#define ZPCI_TABLE_TYPE_RTX 0x4
+#define ZPCI_TABLE_TYPE_SX 0x0
+
+#define ZPCI_TABLE_LEN_RFX 0x3
+#define ZPCI_TABLE_LEN_RSX 0x3
+#define ZPCI_TABLE_LEN_RTX 0x3
+
+#define ZPCI_TABLE_OFFSET_MASK 0xc0
+#define ZPCI_TABLE_SIZE 0x4000
+#define ZPCI_TABLE_ALIGN ZPCI_TABLE_SIZE
+#define ZPCI_TABLE_ENTRY_SIZE (sizeof(unsigned long))
+#define ZPCI_TABLE_ENTRIES (ZPCI_TABLE_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+
+#define ZPCI_TABLE_BITS 11
+#define ZPCI_PT_BITS 8
+#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT)
+#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS)
+
+#define ZPCI_RTE_FLAG_MASK 0x3fffUL
+#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK)
+#define ZPCI_STE_FLAG_MASK 0x7ffUL
+#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK)
+
+/* I/O Page tables */
+#define ZPCI_PTE_VALID_MASK 0x400
+#define ZPCI_PTE_INVALID 0x400
+#define ZPCI_PTE_VALID 0x000
+#define ZPCI_PT_SIZE 0x800
+#define ZPCI_PT_ALIGN ZPCI_PT_SIZE
+#define ZPCI_PT_ENTRIES (ZPCI_PT_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+#define ZPCI_PT_MASK (ZPCI_PT_ENTRIES - 1)
+
+#define ZPCI_PTE_FLAG_MASK 0xfffUL
+#define ZPCI_PTE_ADDR_MASK (~ZPCI_PTE_FLAG_MASK)
+
+/* Shared bits */
+#define ZPCI_TABLE_VALID 0x00
+#define ZPCI_TABLE_INVALID 0x20
+#define ZPCI_TABLE_PROTECTED 0x200
+#define ZPCI_TABLE_UNPROTECTED 0x000
+
+#define ZPCI_TABLE_VALID_MASK 0x20
+#define ZPCI_TABLE_PROT_MASK 0x200
+
+static inline unsigned int calc_rtx(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_sx(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_px(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
+}
+
+static inline void set_pt_pfaa(unsigned long *entry, void *pfaa)
+{
+ *entry &= ZPCI_PTE_FLAG_MASK;
+ *entry |= ((unsigned long) pfaa & ZPCI_PTE_ADDR_MASK);
+}
+
+static inline void set_rt_sto(unsigned long *entry, void *sto)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= ((unsigned long) sto & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RTX;
+}
+
+static inline void set_st_pto(unsigned long *entry, void *pto)
+{
+ *entry &= ZPCI_STE_FLAG_MASK;
+ *entry |= ((unsigned long) pto & ZPCI_STE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_SX;
+}
+
+static inline void validate_rt_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RTX;
+}
+
+static inline void validate_st_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+}
+
+static inline void invalidate_table_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry |= ZPCI_TABLE_INVALID;
+}
+
+static inline void invalidate_pt_entry(unsigned long *entry)
+{
+ WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
+ *entry &= ~ZPCI_PTE_VALID_MASK;
+ *entry |= ZPCI_PTE_INVALID;
+}
+
+static inline void validate_pt_entry(unsigned long *entry)
+{
+ WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
+ *entry &= ~ZPCI_PTE_VALID_MASK;
+ *entry |= ZPCI_PTE_VALID;
+}
+
+static inline void entry_set_protected(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_PROT_MASK;
+ *entry |= ZPCI_TABLE_PROTECTED;
+}
+
+static inline void entry_clr_protected(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_PROT_MASK;
+ *entry |= ZPCI_TABLE_UNPROTECTED;
+}
+
+static inline int reg_entry_isvalid(unsigned long entry)
+{
+ return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
+}
+
+static inline int pt_entry_isvalid(unsigned long entry)
+{
+ return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
+}
+
+static inline int entry_isprotected(unsigned long entry)
+{
+ return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
+}
+
+static inline unsigned long *get_rt_sto(unsigned long entry)
+{
+ return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
+ ? (unsigned long *) (entry & ZPCI_RTE_ADDR_MASK)
+ : NULL;
+}
+
+static inline unsigned long *get_st_pto(unsigned long entry)
+{
+ return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
+ ? (unsigned long *) (entry & ZPCI_STE_ADDR_MASK)
+ : NULL;
+}
+
+/* Prototypes */
+int zpci_dma_init_device(struct zpci_dev *);
+void zpci_dma_exit_device(struct zpci_dev *);
+
+#endif
diff --git a/kernel/arch/s390/include/asm/pci_insn.h b/kernel/arch/s390/include/asm/pci_insn.h
new file mode 100644
index 000000000..649eb62c5
--- /dev/null
+++ b/kernel/arch/s390/include/asm/pci_insn.h
@@ -0,0 +1,86 @@
+#ifndef _ASM_S390_PCI_INSN_H
+#define _ASM_S390_PCI_INSN_H
+
+/* Load/Store status codes */
+#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
+#define ZPCI_PCI_ST_FUNC_IN_ERR 8
+#define ZPCI_PCI_ST_BLOCKED 12
+#define ZPCI_PCI_ST_INSUF_RES 16
+#define ZPCI_PCI_ST_INVAL_AS 20
+#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24
+#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28
+#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36
+#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
+#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
+
+/* Load/Store return codes */
+#define ZPCI_PCI_LS_OK 0
+#define ZPCI_PCI_LS_ERR 1
+#define ZPCI_PCI_LS_BUSY 2
+#define ZPCI_PCI_LS_INVAL_HANDLE 3
+
+/* Load/Store address space identifiers */
+#define ZPCI_PCIAS_MEMIO_0 0
+#define ZPCI_PCIAS_MEMIO_1 1
+#define ZPCI_PCIAS_MEMIO_2 2
+#define ZPCI_PCIAS_MEMIO_3 3
+#define ZPCI_PCIAS_MEMIO_4 4
+#define ZPCI_PCIAS_MEMIO_5 5
+#define ZPCI_PCIAS_CFGSPC 15
+
+/* Modify PCI Function Controls */
+#define ZPCI_MOD_FC_REG_INT 2
+#define ZPCI_MOD_FC_DEREG_INT 3
+#define ZPCI_MOD_FC_REG_IOAT 4
+#define ZPCI_MOD_FC_DEREG_IOAT 5
+#define ZPCI_MOD_FC_REREG_IOAT 6
+#define ZPCI_MOD_FC_RESET_ERROR 7
+#define ZPCI_MOD_FC_RESET_BLOCK 9
+#define ZPCI_MOD_FC_SET_MEASURE 10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* Function Information Block */
+struct zpci_fib {
+ u32 fmt : 8; /* format */
+ u32 : 24;
+ u32 : 32;
+ u8 fc; /* function controls */
+ u64 : 56;
+ u64 pba; /* PCI base address */
+ u64 pal; /* PCI address limit */
+ u64 iota; /* I/O Translation Anchor */
+ u32 : 1;
+ u32 isc : 3; /* Interrupt subclass */
+ u32 noi : 12; /* Number of interrupts */
+ u32 : 2;
+ u32 aibvo : 6; /* Adapter interrupt bit vector offset */
+ u32 sum : 1; /* Adapter int summary bit enabled */
+ u32 : 1;
+ u32 aisbo : 6; /* Adapter int summary bit offset */
+ u32 : 32;
+ u64 aibv; /* Adapter int bit vector address */
+ u64 aisb; /* Adapter int summary bit address */
+ u64 fmb_addr; /* Function measurement block address and key */
+ u32 : 32;
+ u32 gd;
+} __packed __aligned(8);
+
+int zpci_mod_fc(u64 req, struct zpci_fib *fib);
+int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
+int zpci_load(u64 *data, u64 req, u64 offset);
+int zpci_store(u64 data, u64 req, u64 offset);
+int zpci_store_block(const u64 *data, u64 req, u64 offset);
+void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+
+#endif
diff --git a/kernel/arch/s390/include/asm/pci_io.h b/kernel/arch/s390/include/asm/pci_io.h
new file mode 100644
index 000000000..1a9a98de5
--- /dev/null
+++ b/kernel/arch/s390/include/asm/pci_io.h
@@ -0,0 +1,201 @@
+#ifndef _ASM_S390_PCI_IO_H
+#define _ASM_S390_PCI_IO_H
+
+#ifdef CONFIG_PCI
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <asm/pci_insn.h>
+
+/* I/O Map */
+#define ZPCI_IOMAP_MAX_ENTRIES 0x7fff
+#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000ULL
+#define ZPCI_IOMAP_ADDR_IDX_MASK 0x7fff000000000000ULL
+#define ZPCI_IOMAP_ADDR_OFF_MASK 0x0000ffffffffffffULL
+
+struct zpci_iomap_entry {
+ u32 fh;
+ u8 bar;
+ u16 count;
+};
+
+extern struct zpci_iomap_entry *zpci_iomap_start;
+
+#define ZPCI_IDX(addr) \
+ (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48)
+#define ZPCI_OFFSET(addr) \
+ ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
+
+#define ZPCI_CREATE_REQ(handle, space, len) \
+ ((u64) handle << 32 | space << 16 | len)
+
+#define zpci_read(LENGTH, RETTYPE) \
+static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
+{ \
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
+ u64 data; \
+ int rc; \
+ \
+ rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
+ if (rc) \
+ data = -1ULL; \
+ return (RETTYPE) data; \
+}
+
+#define zpci_write(LENGTH, VALTYPE) \
+static inline void zpci_write_##VALTYPE(VALTYPE val, \
+ const volatile void __iomem *addr) \
+{ \
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
+ u64 data = (VALTYPE) val; \
+ \
+ zpci_store(data, req, ZPCI_OFFSET(addr)); \
+}
+
+zpci_read(8, u64)
+zpci_read(4, u32)
+zpci_read(2, u16)
+zpci_read(1, u8)
+zpci_write(8, u64)
+zpci_write(4, u32)
+zpci_write(2, u16)
+zpci_write(1, u8)
+
+static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
+{
+ u64 val;
+
+ switch (len) {
+ case 1:
+ val = (u64) *((u8 *) data);
+ break;
+ case 2:
+ val = (u64) *((u16 *) data);
+ break;
+ case 4:
+ val = (u64) *((u32 *) data);
+ break;
+ case 8:
+ val = (u64) *((u64 *) data);
+ break;
+ default:
+ val = 0; /* let FW report error */
+ break;
+ }
+ return zpci_store(val, req, offset);
+}
+
+static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
+{
+ u64 data;
+ int cc;
+
+ cc = zpci_load(&data, req, offset);
+ if (cc)
+ goto out;
+
+ switch (len) {
+ case 1:
+ *((u8 *) dst) = (u8) data;
+ break;
+ case 2:
+ *((u16 *) dst) = (u16) data;
+ break;
+ case 4:
+ *((u32 *) dst) = (u32) data;
+ break;
+ case 8:
+ *((u64 *) dst) = (u64) data;
+ break;
+ }
+out:
+ return cc;
+}
+
+static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
+{
+ return zpci_store_block(data, req, offset);
+}
+
+static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
+{
+ int count = len > max ? max : len, size = 1;
+
+ while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
+ dst = dst >> 1;
+ src = src >> 1;
+ size = size << 1;
+ }
+ return size;
+}
+
+static inline int zpci_memcpy_fromio(void *dst,
+ const volatile void __iomem *src,
+ unsigned long n)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
+ u64 req, offset = ZPCI_OFFSET(src);
+ int size, rc = 0;
+
+ while (n > 0) {
+ size = zpci_get_max_write_size((u64 __force) src,
+ (u64) dst, n, 8);
+ req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
+ rc = zpci_read_single(req, dst, offset, size);
+ if (rc)
+ break;
+ offset += size;
+ dst += size;
+ n -= size;
+ }
+ return rc;
+}
+
+static inline int zpci_memcpy_toio(volatile void __iomem *dst,
+ const void *src, unsigned long n)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
+ u64 req, offset = ZPCI_OFFSET(dst);
+ int size, rc = 0;
+
+ if (!src)
+ return -EINVAL;
+
+ while (n > 0) {
+ size = zpci_get_max_write_size((u64 __force) dst,
+ (u64) src, n, 128);
+ req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
+
+ if (size > 8) /* main path */
+ rc = zpci_write_block(req, src, offset);
+ else
+ rc = zpci_write_single(req, src, offset, size);
+ if (rc)
+ break;
+ offset += size;
+ src += size;
+ n -= size;
+ }
+ return rc;
+}
+
+static inline int zpci_memset_io(volatile void __iomem *dst,
+ unsigned char val, size_t count)
+{
+ u8 *src = kmalloc(count, GFP_KERNEL);
+ int rc;
+
+ if (src == NULL)
+ return -ENOMEM;
+ memset(src, val, count);
+
+ rc = zpci_memcpy_toio(dst, src, count);
+ kfree(src);
+ return rc;
+}
+
+#endif /* CONFIG_PCI */
+
+#endif /* _ASM_S390_PCI_IO_H */
diff --git a/kernel/arch/s390/include/asm/percpu.h b/kernel/arch/s390/include/asm/percpu.h
new file mode 100644
index 000000000..6d6556ca2
--- /dev/null
+++ b/kernel/arch/s390/include/asm/percpu.h
@@ -0,0 +1,186 @@
+#ifndef __ARCH_S390_PERCPU__
+#define __ARCH_S390_PERCPU__
+
+#include <linux/preempt.h>
+#include <asm/cmpxchg.h>
+
+/*
+ * s390 uses its own implementation for per cpu data, the offset of
+ * the cpu local data area is cached in the cpu's lowcore memory.
+ */
+#define __my_cpu_offset S390_lowcore.percpu_offset
+
+/*
+ * For 64 bit module code, the module may be more than 4G above the
+ * per cpu area, use weak definitions to force the compiler to
+ * generate external references.
+ */
+#if defined(CONFIG_SMP) && defined(MODULE)
+#define ARCH_NEEDS_WEAK_PER_CPU
+#endif
+
+/*
+ * We use a compare-and-swap loop since that uses less cpu cycles than
+ * disabling and enabling interrupts like the generic variant would do.
+ */
+#define arch_this_cpu_to_op_simple(pcp, val, op) \
+({ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ old__, new__, prev__; \
+ pcp_op_T__ *ptr__; \
+ preempt_disable(); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
+ prev__ = *ptr__; \
+ do { \
+ old__ = prev__; \
+ new__ = old__ op (val); \
+ prev__ = cmpxchg(ptr__, old__, new__); \
+ } while (prev__ != old__); \
+ preempt_enable(); \
+ new__; \
+})
+
+#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+
+#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
+{ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
+ if (__builtin_constant_p(val__) && \
+ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
+ asm volatile( \
+ op2 " %[ptr__],%[val__]\n" \
+ : [ptr__] "+Q" (*ptr__) \
+ : [val__] "i" ((szcast)val__) \
+ : "cc"); \
+ } else { \
+ asm volatile( \
+ op1 " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ } \
+ preempt_enable(); \
+}
+
+#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long)
+
+#define arch_this_cpu_add_return(pcp, val, op) \
+({ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
+ asm volatile( \
+ op " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ preempt_enable(); \
+ old__ + val__; \
+})
+
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa")
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag")
+
+#define arch_this_cpu_to_op(pcp, val, op) \
+{ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ val__ = (val); \
+ pcp_op_T__ old__, *ptr__; \
+ preempt_disable(); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
+ asm volatile( \
+ op " %[old__],%[val__],%[ptr__]\n" \
+ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
+ : [val__] "d" (val__) \
+ : "cc"); \
+ preempt_enable(); \
+}
+
+#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
+#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, "lang")
+#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
+#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
+({ \
+ typedef typeof(pcp) pcp_op_T__; \
+ pcp_op_T__ ret__; \
+ pcp_op_T__ *ptr__; \
+ preempt_disable(); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
+ ret__ = cmpxchg(ptr__, oval, nval); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
+#define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
+#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
+#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
+
+#define arch_this_cpu_xchg(pcp, nval) \
+({ \
+ typeof(pcp) *ptr__; \
+ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
+ ret__ = xchg(ptr__, nval); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
+#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
+#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
+#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
+
+#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
+({ \
+ typeof(pcp1) o1__ = (o1), n1__ = (n1); \
+ typeof(pcp2) o2__ = (o2), n2__ = (n2); \
+ typeof(pcp1) *p1__; \
+ typeof(pcp2) *p2__; \
+ int ret__; \
+ preempt_disable(); \
+ p1__ = raw_cpu_ptr(&(pcp1)); \
+ p2__ = raw_cpu_ptr(&(pcp2)); \
+ ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
+#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ARCH_S390_PERCPU__ */
diff --git a/kernel/arch/s390/include/asm/perf_event.h b/kernel/arch/s390/include/asm/perf_event.h
new file mode 100644
index 000000000..4cb19fe76
--- /dev/null
+++ b/kernel/arch/s390/include/asm/perf_event.h
@@ -0,0 +1,93 @@
+/*
+ * Performance event support - s390 specific definitions.
+ *
+ * Copyright IBM Corp. 2009, 2013
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#ifndef _ASM_S390_PERF_EVENT_H
+#define _ASM_S390_PERF_EVENT_H
+
+#include <linux/perf_event.h>
+#include <linux/device.h>
+#include <asm/cpu_mf.h>
+
+/* Per-CPU flags for PMU states */
+#define PMU_F_RESERVED 0x1000
+#define PMU_F_ENABLED 0x2000
+#define PMU_F_IN_USE 0x4000
+#define PMU_F_ERR_IBE 0x0100
+#define PMU_F_ERR_LSDA 0x0200
+#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA)
+
+/* Perf defintions for PMU event attributes in sysfs */
+extern __init const struct attribute_group **cpumf_cf_event_group(void);
+extern ssize_t cpumf_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page);
+#define EVENT_VAR(_cat, _name) event_attr_##_cat##_##_name
+#define EVENT_PTR(_cat, _name) (&EVENT_VAR(_cat, _name).attr.attr)
+
+#define CPUMF_EVENT_ATTR(cat, name, id) \
+ PMU_EVENT_ATTR(name, EVENT_VAR(cat, name), id, cpumf_events_sysfs_show)
+#define CPUMF_EVENT_PTR(cat, name) EVENT_PTR(cat, name)
+
+
+/* Perf callbacks */
+struct pt_regs;
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+#define perf_misc_flags(regs) perf_misc_flags(regs)
+
+/* Perf pt_regs extension for sample-data-entry indicators */
+struct perf_sf_sde_regs {
+ unsigned char in_guest:1; /* guest sample */
+ unsigned long reserved:63; /* reserved */
+};
+
+/* Perf PMU definitions for the counter facility */
+#define PERF_CPUM_CF_MAX_CTR 256
+
+/* Perf PMU definitions for the sampling facility */
+#define PERF_CPUM_SF_MAX_CTR 2
+#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
+#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
+#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
+#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
+#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
+ PERF_CPUM_SF_DIAG_MODE)
+#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
+
+#define REG_NONE 0
+#define REG_OVERFLOW 1
+#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
+#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
+#define RAWSAMPLE_REG(hwc) ((hwc)->config)
+#define TEAR_REG(hwc) ((hwc)->last_tag)
+#define SAMPL_RATE(hwc) ((hwc)->event_base)
+#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
+#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
+#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
+
+/* Structure for sampling data entries to be passed as perf raw sample data
+ * to user space. Note that raw sample data must be aligned and, thus, might
+ * be padded with zeros.
+ */
+struct sf_raw_sample {
+#define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE
+#define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE
+ u64 format;
+ u32 size; /* Size of sf_raw_sample */
+ u16 bsdes; /* Basic-sampling data entry size */
+ u16 dsdes; /* Diagnostic-sampling data entry size */
+ struct hws_basic_entry basic; /* Basic-sampling data entry */
+ struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
+ u8 padding[]; /* Padding to next multiple of 8 */
+} __packed;
+
+/* Perf hardware reserve and release functions */
+int perf_reserve_sampling(void);
+void perf_release_sampling(void);
+
+#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/kernel/arch/s390/include/asm/pgalloc.h b/kernel/arch/s390/include/asm/pgalloc.h
new file mode 100644
index 000000000..7b7858f15
--- /dev/null
+++ b/kernel/arch/s390/include/asm/pgalloc.h
@@ -0,0 +1,132 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999, 2000
+ * Author(s): Hartmut Penner (hp@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/pgalloc.h"
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef _S390_PGALLOC_H
+#define _S390_PGALLOC_H
+
+#include <linux/threads.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+
+unsigned long *crst_table_alloc(struct mm_struct *);
+void crst_table_free(struct mm_struct *, unsigned long *);
+
+unsigned long *page_table_alloc(struct mm_struct *);
+void page_table_free(struct mm_struct *, unsigned long *);
+void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
+extern int page_table_allocate_pgste;
+
+int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ unsigned long key, bool nq);
+unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
+
+static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
+{
+ typedef struct { char _[n]; } addrtype;
+
+ *s = val;
+ n = (n / 256) - 1;
+ asm volatile(
+ " mvc 8(248,%0),0(%0)\n"
+ "0: mvc 256(256,%0),0(%0)\n"
+ " la %0,256(%0)\n"
+ " brct %1,0b\n"
+ : "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
+ : "m" (*(addrtype *) s));
+}
+
+static inline void crst_table_init(unsigned long *crst, unsigned long entry)
+{
+ clear_table(crst, entry, sizeof(unsigned long)*2048);
+}
+
+static inline unsigned long pgd_entry_type(struct mm_struct *mm)
+{
+ if (mm->context.asce_limit <= (1UL << 31))
+ return _SEGMENT_ENTRY_EMPTY;
+ if (mm->context.asce_limit <= (1UL << 42))
+ return _REGION3_ENTRY_EMPTY;
+ return _REGION2_ENTRY_EMPTY;
+}
+
+int crst_table_upgrade(struct mm_struct *, unsigned long limit);
+void crst_table_downgrade(struct mm_struct *, unsigned long limit);
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ unsigned long *table = crst_table_alloc(mm);
+ if (table)
+ crst_table_init(table, _REGION3_ENTRY_EMPTY);
+ return (pud_t *) table;
+}
+#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
+{
+ unsigned long *table = crst_table_alloc(mm);
+
+ if (!table)
+ return NULL;
+ crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
+ if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
+ crst_table_free(mm, table);
+ return NULL;
+ }
+ return (pmd_t *) table;
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
+ crst_table_free(mm, (unsigned long *) pmd);
+}
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+ pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ spin_lock_init(&mm->context.list_lock);
+ INIT_LIST_HEAD(&mm->context.pgtable_list);
+ INIT_LIST_HEAD(&mm->context.gmap_list);
+ return (pgd_t *) crst_table_alloc(mm);
+}
+#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
+
+static inline void pmd_populate(struct mm_struct *mm,
+ pmd_t *pmd, pgtable_t pte)
+{
+ pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
+}
+
+#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
+
+#define pmd_pgtable(pmd) \
+ (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
+
+/*
+ * page table entry allocation/free routines.
+ */
+#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
+#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
+
+#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
+#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
+
+extern void rcu_table_freelist_finish(void);
+
+#endif /* _S390_PGALLOC_H */
diff --git a/kernel/arch/s390/include/asm/pgtable.h b/kernel/arch/s390/include/asm/pgtable.h
new file mode 100644
index 000000000..ef24a212e
--- /dev/null
+++ b/kernel/arch/s390/include/asm/pgtable.h
@@ -0,0 +1,1637 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999, 2000
+ * Author(s): Hartmut Penner (hp@de.ibm.com)
+ * Ulrich Weigand (weigand@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/pgtable.h"
+ */
+
+#ifndef _ASM_S390_PGTABLE_H
+#define _ASM_S390_PGTABLE_H
+
+/*
+ * The Linux memory management assumes a three-level page table setup.
+ * For s390 64 bit we use up to four of the five levels the hardware
+ * provides (region first tables are not used).
+ *
+ * The "pgd_xxx()" functions are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the S390 page table tree.
+ */
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/page-flags.h>
+#include <linux/radix-tree.h>
+#include <asm/bug.h>
+#include <asm/page.h>
+
+extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
+extern void paging_init(void);
+extern void vmem_map_init(void);
+
+/*
+ * The S390 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ */
+#define update_mmu_cache(vma, address, ptep) do { } while (0)
+#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero; used
+ * for zero-mapped memory areas etc..
+ */
+
+extern unsigned long empty_zero_page;
+extern unsigned long zero_page_mask;
+
+#define ZERO_PAGE(vaddr) \
+ (virt_to_page((void *)(empty_zero_page + \
+ (((unsigned long)(vaddr)) &zero_page_mask))))
+#define __HAVE_COLOR_ZERO_PAGE
+
+/* TODO: s390 cannot support io_remap_pfn_range... */
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * PMD_SHIFT determines the size of the area a second-level page
+ * table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PMD_SHIFT 20
+#define PUD_SHIFT 31
+#define PGDIR_SHIFT 42
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: the S390 is two-level, so
+ * we don't really have any PMD directory physically.
+ * for S390 segment-table entries are combined to one PGD
+ * that leads to 1024 pte per pgd
+ */
+#define PTRS_PER_PTE 256
+#define PTRS_PER_PMD 2048
+#define PTRS_PER_PUD 2048
+#define PTRS_PER_PGD 2048
+
+#define FIRST_USER_ADDRESS 0UL
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
+
+#ifndef __ASSEMBLY__
+/*
+ * The vmalloc and module area will always be on the topmost area of the
+ * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
+ * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
+ * modules will reside. That makes sure that inter module branches always
+ * happen without trampolines and in addition the placement within a 2GB frame
+ * is branch prediction unit friendly.
+ */
+extern unsigned long VMALLOC_START;
+extern unsigned long VMALLOC_END;
+extern struct page *vmemmap;
+
+#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
+
+extern unsigned long MODULES_VADDR;
+extern unsigned long MODULES_END;
+#define MODULES_VADDR MODULES_VADDR
+#define MODULES_END MODULES_END
+#define MODULES_LEN (1UL << 31)
+
+static inline int is_module_addr(void *addr)
+{
+ BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
+ if (addr < (void *)MODULES_VADDR)
+ return 0;
+ if (addr > (void *)MODULES_END)
+ return 0;
+ return 1;
+}
+
+/*
+ * A 64 bit pagetable entry of S390 has following format:
+ * | PFRA |0IPC| OS |
+ * 0000000000111111111122222222223333333333444444444455555555556666
+ * 0123456789012345678901234567890123456789012345678901234567890123
+ *
+ * I Page-Invalid Bit: Page is not available for address-translation
+ * P Page-Protection Bit: Store access not possible for page
+ * C Change-bit override: HW is not required to set change bit
+ *
+ * A 64 bit segmenttable entry of S390 has following format:
+ * | P-table origin | TT
+ * 0000000000111111111122222222223333333333444444444455555555556666
+ * 0123456789012345678901234567890123456789012345678901234567890123
+ *
+ * I Segment-Invalid Bit: Segment is not available for address-translation
+ * C Common-Segment Bit: Segment is not private (PoP 3-30)
+ * P Page-Protection Bit: Store access not possible for page
+ * TT Type 00
+ *
+ * A 64 bit region table entry of S390 has following format:
+ * | S-table origin | TF TTTL
+ * 0000000000111111111122222222223333333333444444444455555555556666
+ * 0123456789012345678901234567890123456789012345678901234567890123
+ *
+ * I Segment-Invalid Bit: Segment is not available for address-translation
+ * TT Type 01
+ * TF
+ * TL Table length
+ *
+ * The 64 bit regiontable origin of S390 has following format:
+ * | region table origon | DTTL
+ * 0000000000111111111122222222223333333333444444444455555555556666
+ * 0123456789012345678901234567890123456789012345678901234567890123
+ *
+ * X Space-Switch event:
+ * G Segment-Invalid Bit:
+ * P Private-Space Bit:
+ * S Storage-Alteration:
+ * R Real space
+ * TL Table-Length:
+ *
+ * A storage key has the following format:
+ * | ACC |F|R|C|0|
+ * 0 3 4 5 6 7
+ * ACC: access key
+ * F : fetch protection bit
+ * R : referenced bit
+ * C : changed bit
+ */
+
+/* Hardware bits in the page table entry */
+#define _PAGE_PROTECT 0x200 /* HW read-only bit */
+#define _PAGE_INVALID 0x400 /* HW invalid bit */
+#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
+
+/* Software bits in the page table entry */
+#define _PAGE_PRESENT 0x001 /* SW pte present bit */
+#define _PAGE_YOUNG 0x004 /* SW pte young bit */
+#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
+#define _PAGE_READ 0x010 /* SW pte read bit */
+#define _PAGE_WRITE 0x020 /* SW pte write bit */
+#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
+#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
+#define __HAVE_ARCH_PTE_SPECIAL
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
+ _PAGE_YOUNG)
+
+/*
+ * handle_pte_fault uses pte_present and pte_none to find out the pte type
+ * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
+ * distinguish present from not-present ptes. It is changed only with the page
+ * table lock held.
+ *
+ * The following table gives the different possible bit combinations for
+ * the pte hardware and software bits in the last 12 bits of a pte
+ * (. unassigned bit, x don't care, t swap type):
+ *
+ * 842100000000
+ * 000084210000
+ * 000000008421
+ * .IR.uswrdy.p
+ * empty .10.00000000
+ * swap .11..ttttt.0
+ * prot-none, clean, old .11.xx0000.1
+ * prot-none, clean, young .11.xx0001.1
+ * prot-none, dirty, old .10.xx0010.1
+ * prot-none, dirty, young .10.xx0011.1
+ * read-only, clean, old .11.xx0100.1
+ * read-only, clean, young .01.xx0101.1
+ * read-only, dirty, old .11.xx0110.1
+ * read-only, dirty, young .01.xx0111.1
+ * read-write, clean, old .11.xx1100.1
+ * read-write, clean, young .01.xx1101.1
+ * read-write, dirty, old .10.xx1110.1
+ * read-write, dirty, young .00.xx1111.1
+ * HW-bits: R read-only, I invalid
+ * SW-bits: p present, y young, d dirty, r read, w write, s special,
+ * u unused, l large
+ *
+ * pte_none is true for the bit pattern .10.00000000, pte == 0x400
+ * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
+ * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
+ */
+
+/* Bits in the segment/region table address-space-control-element */
+#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
+#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
+#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
+#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
+#define _ASCE_REAL_SPACE 0x20 /* real space control */
+#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
+#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
+#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
+#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
+#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
+#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
+
+/* Bits in the region table entry */
+#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
+#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
+#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
+#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
+#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
+#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
+#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
+#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
+
+#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
+#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
+#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
+#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
+#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
+#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
+
+#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
+#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
+
+/* Bits in the segment table entry */
+#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
+#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
+#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
+#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
+#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
+#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
+
+#define _SEGMENT_ENTRY (0)
+#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
+
+#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
+#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
+#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
+#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
+#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
+#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
+
+/*
+ * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
+ * dy..R...I...wr
+ * prot-none, clean, old 00..1...1...00
+ * prot-none, clean, young 01..1...1...00
+ * prot-none, dirty, old 10..1...1...00
+ * prot-none, dirty, young 11..1...1...00
+ * read-only, clean, old 00..1...1...01
+ * read-only, clean, young 01..1...0...01
+ * read-only, dirty, old 10..1...1...01
+ * read-only, dirty, young 11..1...0...01
+ * read-write, clean, old 00..1...1...11
+ * read-write, clean, young 01..1...0...11
+ * read-write, dirty, old 10..0...1...11
+ * read-write, dirty, young 11..0...0...11
+ * The segment table origin is used to distinguish empty (origin==0) from
+ * read-write, old segment table entries (origin!=0)
+ * HW-bits: R read-only, I invalid
+ * SW-bits: y young, d dirty, r read, w write
+ */
+
+#define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
+
+/* Page status table bits for virtualization */
+#define PGSTE_ACC_BITS 0xf000000000000000UL
+#define PGSTE_FP_BIT 0x0800000000000000UL
+#define PGSTE_PCL_BIT 0x0080000000000000UL
+#define PGSTE_HR_BIT 0x0040000000000000UL
+#define PGSTE_HC_BIT 0x0020000000000000UL
+#define PGSTE_GR_BIT 0x0004000000000000UL
+#define PGSTE_GC_BIT 0x0002000000000000UL
+#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
+#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
+
+/* Guest Page State used for virtualization */
+#define _PGSTE_GPS_ZERO 0x0000000080000000UL
+#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
+#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
+#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
+
+/*
+ * A user page table pointer has the space-switch-event bit, the
+ * private-space-control bit and the storage-alteration-event-control
+ * bit set. A kernel page table pointer doesn't need them.
+ */
+#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
+ _ASCE_ALT_EVENT)
+
+/*
+ * Page protection definitions.
+ */
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
+#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_INVALID | _PAGE_PROTECT)
+#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_INVALID | _PAGE_PROTECT)
+
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_YOUNG | _PAGE_DIRTY)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_YOUNG | _PAGE_DIRTY)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
+ _PAGE_PROTECT)
+
+/*
+ * On s390 the page table entry has an invalid bit and a read-only bit.
+ * Read permission implies execute permission and write permission
+ * implies read permission.
+ */
+ /*xwr*/
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READ
+#define __P010 PAGE_READ
+#define __P011 PAGE_READ
+#define __P100 PAGE_READ
+#define __P101 PAGE_READ
+#define __P110 PAGE_READ
+#define __P111 PAGE_READ
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READ
+#define __S010 PAGE_WRITE
+#define __S011 PAGE_WRITE
+#define __S100 PAGE_READ
+#define __S101 PAGE_READ
+#define __S110 PAGE_WRITE
+#define __S111 PAGE_WRITE
+
+/*
+ * Segment entry (large page) protection definitions.
+ */
+#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
+ _SEGMENT_ENTRY_PROTECT)
+#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
+ _SEGMENT_ENTRY_READ)
+#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
+ _SEGMENT_ENTRY_WRITE)
+
+static inline int mm_has_pgste(struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+ if (unlikely(mm->context.has_pgste))
+ return 1;
+#endif
+ return 0;
+}
+
+static inline int mm_alloc_pgste(struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+ if (unlikely(mm->context.alloc_pgste))
+ return 1;
+#endif
+ return 0;
+}
+
+/*
+ * In the case that a guest uses storage keys
+ * faults should no longer be backed by zero pages
+ */
+#define mm_forbids_zeropage mm_use_skey
+static inline int mm_use_skey(struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+ if (mm->context.use_skey)
+ return 1;
+#endif
+ return 0;
+}
+
+/*
+ * pgd/pmd/pte query functions
+ */
+static inline int pgd_present(pgd_t pgd)
+{
+ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
+ return 1;
+ return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
+}
+
+static inline int pgd_none(pgd_t pgd)
+{
+ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
+ return 0;
+ return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+ /*
+ * With dynamic page table levels the pgd can be a region table
+ * entry or a segment table entry. Check for the bit that are
+ * invalid for either table entry.
+ */
+ unsigned long mask =
+ ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
+ ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
+ return (pgd_val(pgd) & mask) != 0;
+}
+
+static inline int pud_present(pud_t pud)
+{
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
+ return 1;
+ return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
+}
+
+static inline int pud_none(pud_t pud)
+{
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
+ return 0;
+ return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
+}
+
+static inline int pud_large(pud_t pud)
+{
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
+ return 0;
+ return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ /*
+ * With dynamic page table levels the pud can be a region table
+ * entry or a segment table entry. Check for the bit that are
+ * invalid for either table entry.
+ */
+ unsigned long mask =
+ ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
+ ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
+ return (pud_val(pud) & mask) != 0;
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+ return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
+}
+
+static inline int pmd_none(pmd_t pmd)
+{
+ return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
+}
+
+static inline int pmd_large(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
+}
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ unsigned long origin_mask;
+
+ origin_mask = _SEGMENT_ENTRY_ORIGIN;
+ if (pmd_large(pmd))
+ origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
+ return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ if (pmd_large(pmd))
+ return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
+ return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
+}
+
+#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
+
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMD_WRITE
+static inline int pmd_write(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
+}
+
+static inline int pmd_dirty(pmd_t pmd)
+{
+ int dirty = 1;
+ if (pmd_large(pmd))
+ dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
+ return dirty;
+}
+
+static inline int pmd_young(pmd_t pmd)
+{
+ int young = 1;
+ if (pmd_large(pmd))
+ young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
+ return young;
+}
+
+static inline int pte_present(pte_t pte)
+{
+ /* Bit pattern: (pte & 0x001) == 0x001 */
+ return (pte_val(pte) & _PAGE_PRESENT) != 0;
+}
+
+static inline int pte_none(pte_t pte)
+{
+ /* Bit pattern: pte == 0x400 */
+ return pte_val(pte) == _PAGE_INVALID;
+}
+
+static inline int pte_swap(pte_t pte)
+{
+ /* Bit pattern: (pte & 0x201) == 0x200 */
+ return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
+ == _PAGE_PROTECT;
+}
+
+static inline int pte_special(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_SPECIAL);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t a, pte_t b)
+{
+ return pte_val(a) == pte_val(b);
+}
+
+static inline pgste_t pgste_get_lock(pte_t *ptep)
+{
+ unsigned long new = 0;
+#ifdef CONFIG_PGSTE
+ unsigned long old;
+
+ preempt_disable();
+ asm(
+ " lg %0,%2\n"
+ "0: lgr %1,%0\n"
+ " nihh %0,0xff7f\n" /* clear PCL bit in old */
+ " oihh %1,0x0080\n" /* set PCL bit in new */
+ " csg %0,%1,%2\n"
+ " jl 0b\n"
+ : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
+ : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
+#endif
+ return __pgste(new);
+}
+
+static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
+{
+#ifdef CONFIG_PGSTE
+ asm(
+ " nihh %1,0xff7f\n" /* clear PCL bit */
+ " stg %1,%0\n"
+ : "=Q" (ptep[PTRS_PER_PTE])
+ : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
+ : "cc", "memory");
+ preempt_enable();
+#endif
+}
+
+static inline pgste_t pgste_get(pte_t *ptep)
+{
+ unsigned long pgste = 0;
+#ifdef CONFIG_PGSTE
+ pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
+#endif
+ return __pgste(pgste);
+}
+
+static inline void pgste_set(pte_t *ptep, pgste_t pgste)
+{
+#ifdef CONFIG_PGSTE
+ *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
+#endif
+}
+
+static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
+ struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+ unsigned long address, bits, skey;
+
+ if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
+ return pgste;
+ address = pte_val(*ptep) & PAGE_MASK;
+ skey = (unsigned long) page_get_storage_key(address);
+ bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
+ /* Transfer page changed & referenced bit to guest bits in pgste */
+ pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
+ /* Copy page access key and fetch protection bit to pgste */
+ pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
+ pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
+#endif
+ return pgste;
+
+}
+
+static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
+ struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+ unsigned long address;
+ unsigned long nkey;
+
+ if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
+ return;
+ VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
+ address = pte_val(entry) & PAGE_MASK;
+ /*
+ * Set page access key and fetch protection bit from pgste.
+ * The guest C/R information is still in the PGSTE, set real
+ * key C/R to 0.
+ */
+ nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
+ nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
+ page_set_storage_key(address, nkey, 0);
+#endif
+}
+
+static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
+{
+ if ((pte_val(entry) & _PAGE_PRESENT) &&
+ (pte_val(entry) & _PAGE_WRITE) &&
+ !(pte_val(entry) & _PAGE_INVALID)) {
+ if (!MACHINE_HAS_ESOP) {
+ /*
+ * Without enhanced suppression-on-protection force
+ * the dirty bit on for all writable ptes.
+ */
+ pte_val(entry) |= _PAGE_DIRTY;
+ pte_val(entry) &= ~_PAGE_PROTECT;
+ }
+ if (!(pte_val(entry) & _PAGE_PROTECT))
+ /* This pte allows write access, set user-dirty */
+ pgste_val(pgste) |= PGSTE_UC_BIT;
+ }
+ *ptep = entry;
+ return pgste;
+}
+
+/**
+ * struct gmap_struct - guest address space
+ * @crst_list: list of all crst tables used in the guest address space
+ * @mm: pointer to the parent mm_struct
+ * @guest_to_host: radix tree with guest to host address translation
+ * @host_to_guest: radix tree with pointer to segment table entries
+ * @guest_table_lock: spinlock to protect all entries in the guest page table
+ * @table: pointer to the page directory
+ * @asce: address space control element for gmap page table
+ * @pfault_enabled: defines if pfaults are applicable for the guest
+ */
+struct gmap {
+ struct list_head list;
+ struct list_head crst_list;
+ struct mm_struct *mm;
+ struct radix_tree_root guest_to_host;
+ struct radix_tree_root host_to_guest;
+ spinlock_t guest_table_lock;
+ unsigned long *table;
+ unsigned long asce;
+ unsigned long asce_end;
+ void *private;
+ bool pfault_enabled;
+};
+
+/**
+ * struct gmap_notifier - notify function block for page invalidation
+ * @notifier_call: address of callback function
+ */
+struct gmap_notifier {
+ struct list_head list;
+ void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
+};
+
+struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
+void gmap_free(struct gmap *gmap);
+void gmap_enable(struct gmap *gmap);
+void gmap_disable(struct gmap *gmap);
+int gmap_map_segment(struct gmap *gmap, unsigned long from,
+ unsigned long to, unsigned long len);
+int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
+unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
+unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
+int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
+int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
+void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
+void __gmap_zap(struct gmap *, unsigned long gaddr);
+bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
+
+
+void gmap_register_ipte_notifier(struct gmap_notifier *);
+void gmap_unregister_ipte_notifier(struct gmap_notifier *);
+int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
+void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
+
+static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, pgste_t pgste)
+{
+#ifdef CONFIG_PGSTE
+ if (pgste_val(pgste) & PGSTE_IN_BIT) {
+ pgste_val(pgste) &= ~PGSTE_IN_BIT;
+ gmap_do_ipte_notify(mm, addr, ptep);
+ }
+#endif
+ return pgste;
+}
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t entry)
+{
+ pgste_t pgste;
+
+ if (mm_has_pgste(mm)) {
+ pgste = pgste_get_lock(ptep);
+ pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
+ pgste_set_key(ptep, pgste, entry, mm);
+ pgste = pgste_set_pte(ptep, pgste, entry);
+ pgste_set_unlock(ptep, pgste);
+ } else {
+ *ptep = entry;
+ }
+}
+
+/*
+ * query functions pte_write/pte_dirty/pte_young only work if
+ * pte_present() is true. Undefined behaviour if not..
+ */
+static inline int pte_write(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_WRITE) != 0;
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_DIRTY) != 0;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_YOUNG) != 0;
+}
+
+#define __HAVE_ARCH_PTE_UNUSED
+static inline int pte_unused(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_UNUSED;
+}
+
+/*
+ * pgd/pmd/pte modification functions
+ */
+
+static inline void pgd_clear(pgd_t *pgd)
+{
+ if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
+ pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
+}
+
+static inline void pud_clear(pud_t *pud)
+{
+ if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+ pud_val(*pud) = _REGION3_ENTRY_EMPTY;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_val(*ptep) = _PAGE_INVALID;
+}
+
+/*
+ * The following pte modification functions only work if
+ * pte_present() is true. Undefined behaviour if not..
+ */
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) &= _PAGE_CHG_MASK;
+ pte_val(pte) |= pgprot_val(newprot);
+ /*
+ * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
+ * invalid bit set, clear it again for readable, young pages
+ */
+ if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
+ pte_val(pte) &= ~_PAGE_INVALID;
+ /*
+ * newprot for PAGE_READ and PAGE_WRITE has the page protection
+ * bit set, clear it again for writable, dirty pages
+ */
+ if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
+ pte_val(pte) &= ~_PAGE_PROTECT;
+ return pte;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_WRITE;
+ pte_val(pte) |= _PAGE_PROTECT;
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_WRITE;
+ if (pte_val(pte) & _PAGE_DIRTY)
+ pte_val(pte) &= ~_PAGE_PROTECT;
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_DIRTY;
+ pte_val(pte) |= _PAGE_PROTECT;
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_DIRTY;
+ if (pte_val(pte) & _PAGE_WRITE)
+ pte_val(pte) &= ~_PAGE_PROTECT;
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_YOUNG;
+ pte_val(pte) |= _PAGE_INVALID;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_YOUNG;
+ if (pte_val(pte) & _PAGE_READ)
+ pte_val(pte) &= ~_PAGE_INVALID;
+ return pte;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SPECIAL;
+ return pte;
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_LARGE;
+ return pte;
+}
+#endif
+
+static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
+{
+ unsigned long pto = (unsigned long) ptep;
+
+ /* Invalidation + global TLB flush for the pte */
+ asm volatile(
+ " ipte %2,%3"
+ : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
+}
+
+static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
+{
+ unsigned long pto = (unsigned long) ptep;
+
+ /* Invalidation + local TLB flush for the pte */
+ asm volatile(
+ " .insn rrf,0xb2210000,%2,%3,0,1"
+ : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
+}
+
+static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
+{
+ unsigned long pto = (unsigned long) ptep;
+
+ /* Invalidate a range of ptes + global TLB flush of the ptes */
+ do {
+ asm volatile(
+ " .insn rrf,0xb2210000,%2,%0,%1,0"
+ : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
+ } while (nr != 255);
+}
+
+static inline void ptep_flush_direct(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ int active, count;
+
+ if (pte_val(*ptep) & _PAGE_INVALID)
+ return;
+ active = (mm == current->active_mm) ? 1 : 0;
+ count = atomic_add_return(0x10000, &mm->context.attach_count);
+ if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+ cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
+ __ptep_ipte_local(address, ptep);
+ else
+ __ptep_ipte(address, ptep);
+ atomic_sub(0x10000, &mm->context.attach_count);
+}
+
+static inline void ptep_flush_lazy(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ int active, count;
+
+ if (pte_val(*ptep) & _PAGE_INVALID)
+ return;
+ active = (mm == current->active_mm) ? 1 : 0;
+ count = atomic_add_return(0x10000, &mm->context.attach_count);
+ if ((count & 0xffff) <= active) {
+ pte_val(*ptep) |= _PAGE_INVALID;
+ mm->context.flush_mm = 1;
+ } else
+ __ptep_ipte(address, ptep);
+ atomic_sub(0x10000, &mm->context.attach_count);
+}
+
+/*
+ * Get (and clear) the user dirty bit for a pte.
+ */
+static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ pgste_t pgste;
+ pte_t pte;
+ int dirty;
+
+ if (!mm_has_pgste(mm))
+ return 0;
+ pgste = pgste_get_lock(ptep);
+ dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
+ pgste_val(pgste) &= ~PGSTE_UC_BIT;
+ pte = *ptep;
+ if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
+ pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
+ __ptep_ipte(addr, ptep);
+ if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
+ pte_val(pte) |= _PAGE_PROTECT;
+ else
+ pte_val(pte) |= _PAGE_INVALID;
+ *ptep = pte;
+ }
+ pgste_set_unlock(ptep, pgste);
+ return dirty;
+}
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ pgste_t pgste;
+ pte_t pte, oldpte;
+ int young;
+
+ if (mm_has_pgste(vma->vm_mm)) {
+ pgste = pgste_get_lock(ptep);
+ pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
+ }
+
+ oldpte = pte = *ptep;
+ ptep_flush_direct(vma->vm_mm, addr, ptep);
+ young = pte_young(pte);
+ pte = pte_mkold(pte);
+
+ if (mm_has_pgste(vma->vm_mm)) {
+ pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
+ pgste = pgste_set_pte(ptep, pgste, pte);
+ pgste_set_unlock(ptep, pgste);
+ } else
+ *ptep = pte;
+
+ return young;
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ return ptep_test_and_clear_young(vma, address, ptep);
+}
+
+/*
+ * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
+ * both clear the TLB for the unmapped pte. The reason is that
+ * ptep_get_and_clear is used in common code (e.g. change_pte_range)
+ * to modify an active pte. The sequence is
+ * 1) ptep_get_and_clear
+ * 2) set_pte_at
+ * 3) flush_tlb_range
+ * On s390 the tlb needs to get flushed with the modification of the pte
+ * if the pte is active. The only way how this can be implemented is to
+ * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
+ * is a nop.
+ */
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ pgste_t pgste;
+ pte_t pte;
+
+ if (mm_has_pgste(mm)) {
+ pgste = pgste_get_lock(ptep);
+ pgste = pgste_ipte_notify(mm, address, ptep, pgste);
+ }
+
+ pte = *ptep;
+ ptep_flush_lazy(mm, address, ptep);
+ pte_val(*ptep) = _PAGE_INVALID;
+
+ if (mm_has_pgste(mm)) {
+ pgste = pgste_update_all(&pte, pgste, mm);
+ pgste_set_unlock(ptep, pgste);
+ }
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pgste_t pgste;
+ pte_t pte;
+
+ if (mm_has_pgste(mm)) {
+ pgste = pgste_get_lock(ptep);
+ pgste_ipte_notify(mm, address, ptep, pgste);
+ }
+
+ pte = *ptep;
+ ptep_flush_lazy(mm, address, ptep);
+
+ if (mm_has_pgste(mm)) {
+ pgste = pgste_update_all(&pte, pgste, mm);
+ pgste_set(ptep, pgste);
+ }
+ return pte;
+}
+
+static inline void ptep_modify_prot_commit(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep, pte_t pte)
+{
+ pgste_t pgste;
+
+ if (mm_has_pgste(mm)) {
+ pgste = pgste_get(ptep);
+ pgste_set_key(ptep, pgste, pte, mm);
+ pgste = pgste_set_pte(ptep, pgste, pte);
+ pgste_set_unlock(ptep, pgste);
+ } else
+ *ptep = pte;
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ pgste_t pgste;
+ pte_t pte;
+
+ if (mm_has_pgste(vma->vm_mm)) {
+ pgste = pgste_get_lock(ptep);
+ pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
+ }
+
+ pte = *ptep;
+ ptep_flush_direct(vma->vm_mm, address, ptep);
+ pte_val(*ptep) = _PAGE_INVALID;
+
+ if (mm_has_pgste(vma->vm_mm)) {
+ if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
+ _PGSTE_GPS_USAGE_UNUSED)
+ pte_val(pte) |= _PAGE_UNUSED;
+ pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
+ pgste_set_unlock(ptep, pgste);
+ }
+ return pte;
+}
+
+/*
+ * The batched pte unmap code uses ptep_get_and_clear_full to clear the
+ * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
+ * tlbs of an mm if it can guarantee that the ptes of the mm_struct
+ * cannot be accessed while the batched unmap is running. In this case
+ * full==1 and a simple pte_clear is enough. See tlb.h.
+ */
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep, int full)
+{
+ pgste_t pgste;
+ pte_t pte;
+
+ if (!full && mm_has_pgste(mm)) {
+ pgste = pgste_get_lock(ptep);
+ pgste = pgste_ipte_notify(mm, address, ptep, pgste);
+ }
+
+ pte = *ptep;
+ if (!full)
+ ptep_flush_lazy(mm, address, ptep);
+ pte_val(*ptep) = _PAGE_INVALID;
+
+ if (!full && mm_has_pgste(mm)) {
+ pgste = pgste_update_all(&pte, pgste, mm);
+ pgste_set_unlock(ptep, pgste);
+ }
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ pgste_t pgste;
+ pte_t pte = *ptep;
+
+ if (pte_write(pte)) {
+ if (mm_has_pgste(mm)) {
+ pgste = pgste_get_lock(ptep);
+ pgste = pgste_ipte_notify(mm, address, ptep, pgste);
+ }
+
+ ptep_flush_lazy(mm, address, ptep);
+ pte = pte_wrprotect(pte);
+
+ if (mm_has_pgste(mm)) {
+ pgste = pgste_set_pte(ptep, pgste, pte);
+ pgste_set_unlock(ptep, pgste);
+ } else
+ *ptep = pte;
+ }
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ pgste_t pgste;
+
+ if (pte_same(*ptep, entry))
+ return 0;
+ if (mm_has_pgste(vma->vm_mm)) {
+ pgste = pgste_get_lock(ptep);
+ pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
+ }
+
+ ptep_flush_direct(vma->vm_mm, address, ptep);
+
+ if (mm_has_pgste(vma->vm_mm)) {
+ pgste_set_key(ptep, pgste, entry, vma->vm_mm);
+ pgste = pgste_set_pte(ptep, pgste, entry);
+ pgste_set_unlock(ptep, pgste);
+ } else
+ *ptep = entry;
+ return 1;
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
+{
+ pte_t __pte;
+ pte_val(__pte) = physpage + pgprot_val(pgprot);
+ return pte_mkyoung(__pte);
+}
+
+static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
+{
+ unsigned long physpage = page_to_phys(page);
+ pte_t __pte = mk_pte_phys(physpage, pgprot);
+
+ if (pte_write(__pte) && PageDirty(page))
+ __pte = pte_mkdirty(__pte);
+ return __pte;
+}
+
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
+
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
+#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
+#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+ pud_t *pud = (pud_t *) pgd;
+ if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
+ pud = (pud_t *) pgd_deref(*pgd);
+ return pud + pud_index(address);
+}
+
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+ pmd_t *pmd = (pmd_t *) pud;
+ if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+ pmd = (pmd_t *) pud_deref(*pud);
+ return pmd + pmd_index(address);
+}
+
+#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
+#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
+
+/* Find an entry in the lowest level page table.. */
+#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
+#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
+#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
+#define pte_unmap(pte) do { } while (0)
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
+{
+ /*
+ * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
+ * Convert to segment table entry format.
+ */
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
+ return pgprot_val(SEGMENT_NONE);
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
+ return pgprot_val(SEGMENT_READ);
+ return pgprot_val(SEGMENT_WRITE);
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+ return pmd;
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+ pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
+ if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
+ return pmd;
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
+ return pmd;
+}
+
+static inline pmd_t pmd_mkclean(pmd_t pmd)
+{
+ if (pmd_large(pmd)) {
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+ }
+ return pmd;
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ if (pmd_large(pmd)) {
+ pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
+ }
+ return pmd;
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ if (pmd_large(pmd)) {
+ pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
+ }
+ return pmd;
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ if (pmd_large(pmd)) {
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
+ }
+ return pmd;
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ if (pmd_large(pmd)) {
+ pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
+ _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
+ _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
+ pmd_val(pmd) |= massage_pgprot_pmd(newprot);
+ if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+ if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
+ return pmd;
+ }
+ pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) |= massage_pgprot_pmd(newprot);
+ return pmd;
+}
+
+static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
+{
+ pmd_t __pmd;
+ pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
+ return __pmd;
+}
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
+
+static inline void __pmdp_csp(pmd_t *pmdp)
+{
+ register unsigned long reg2 asm("2") = pmd_val(*pmdp);
+ register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
+ _SEGMENT_ENTRY_INVALID;
+ register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
+
+ asm volatile(
+ " csp %1,%3"
+ : "=m" (*pmdp)
+ : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
+}
+
+static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
+{
+ unsigned long sto;
+
+ sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
+ asm volatile(
+ " .insn rrf,0xb98e0000,%2,%3,0,0"
+ : "=m" (*pmdp)
+ : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
+ : "cc" );
+}
+
+static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
+{
+ unsigned long sto;
+
+ sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
+ asm volatile(
+ " .insn rrf,0xb98e0000,%2,%3,0,1"
+ : "=m" (*pmdp)
+ : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
+ : "cc" );
+}
+
+static inline void pmdp_flush_direct(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ int active, count;
+
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+ return;
+ if (!MACHINE_HAS_IDTE) {
+ __pmdp_csp(pmdp);
+ return;
+ }
+ active = (mm == current->active_mm) ? 1 : 0;
+ count = atomic_add_return(0x10000, &mm->context.attach_count);
+ if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+ cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
+ __pmdp_idte_local(address, pmdp);
+ else
+ __pmdp_idte(address, pmdp);
+ atomic_sub(0x10000, &mm->context.attach_count);
+}
+
+static inline void pmdp_flush_lazy(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ int active, count;
+
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+ return;
+ active = (mm == current->active_mm) ? 1 : 0;
+ count = atomic_add_return(0x10000, &mm->context.attach_count);
+ if ((count & 0xffff) <= active) {
+ pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
+ mm->context.flush_mm = 1;
+ } else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(address, pmdp);
+ else
+ __pmdp_csp(pmdp);
+ atomic_sub(0x10000, &mm->context.attach_count);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) &&
+ (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT);
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t entry)
+{
+ *pmdp = entry;
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+ return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t pmd;
+
+ pmd = *pmdp;
+ pmdp_flush_direct(vma->vm_mm, address, pmdp);
+ *pmdp = pmd_mkold(pmd);
+ return pmd_young(pmd);
+}
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+
+ pmdp_flush_direct(mm, address, pmdp);
+ pmd_clear(pmdp);
+ return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
+static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp, int full)
+{
+ pmd_t pmd = *pmdp;
+
+ if (!full)
+ pmdp_flush_lazy(mm, address, pmdp);
+ pmd_clear(pmdp);
+ return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
+static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
+}
+
+#define __HAVE_ARCH_PMDP_INVALIDATE
+static inline void pmdp_invalidate(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmdp_flush_direct(vma->vm_mm, address, pmdp);
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+
+ if (pmd_write(pmd)) {
+ pmdp_flush_direct(mm, address, pmdp);
+ set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
+ }
+}
+
+#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
+#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
+}
+
+static inline int has_transparent_hugepage(void)
+{
+ return MACHINE_HAS_HPAGE ? 1 : 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/*
+ * 64 bit swap entry format:
+ * A page-table entry has some bits we have to treat in a special way.
+ * Bits 52 and bit 55 have to be zero, otherwise an specification
+ * exception will occur instead of a page translation exception. The
+ * specifiation exception has the bad habit not to store necessary
+ * information in the lowcore.
+ * Bits 54 and 63 are used to indicate the page type.
+ * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
+ * This leaves the bits 0-51 and bits 56-62 to store type and offset.
+ * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
+ * for the offset.
+ * | offset |01100|type |00|
+ * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
+ * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
+ */
+
+#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
+#define __SWP_OFFSET_SHIFT 12
+#define __SWP_TYPE_MASK ((1UL << 5) - 1)
+#define __SWP_TYPE_SHIFT 2
+
+static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
+{
+ pte_t pte;
+
+ pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
+ pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
+ pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
+ return pte;
+}
+
+static inline unsigned long __swp_type(swp_entry_t entry)
+{
+ return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
+}
+
+static inline unsigned long __swp_offset(swp_entry_t entry)
+{
+ return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
+}
+
+static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
+{
+ return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
+}
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#endif /* !__ASSEMBLY__ */
+
+#define kern_addr_valid(addr) (1)
+
+extern int vmem_add_mapping(unsigned long start, unsigned long size);
+extern int vmem_remove_mapping(unsigned long start, unsigned long size);
+extern int s390_enable_sie(void);
+extern int s390_enable_skey(void);
+extern void s390_reset_cmma(struct mm_struct *mm);
+
+/* s390 has a private copy of get unmapped area to deal with cache synonyms */
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+/*
+ * No page table caches to initialise
+ */
+static inline void pgtable_cache_init(void) { }
+static inline void check_pgt_cache(void) { }
+
+#include <asm-generic/pgtable.h>
+
+#endif /* _S390_PAGE_H */
diff --git a/kernel/arch/s390/include/asm/processor.h b/kernel/arch/s390/include/asm/processor.h
new file mode 100644
index 000000000..dedb62185
--- /dev/null
+++ b/kernel/arch/s390/include/asm/processor.h
@@ -0,0 +1,360 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Hartmut Penner (hp@de.ibm.com),
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/processor.h"
+ * Copyright (C) 1994, Linus Torvalds
+ */
+
+#ifndef __ASM_S390_PROCESSOR_H
+#define __ASM_S390_PROCESSOR_H
+
+#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
+#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
+#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
+
+#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING)
+#define _CIF_ASCE (1<<CIF_ASCE)
+#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/linkage.h>
+#include <linux/irqflags.h>
+#include <asm/cpu.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/setup.h>
+#include <asm/runtime_instr.h>
+
+static inline void set_cpu_flag(int flag)
+{
+ S390_lowcore.cpu_flags |= (1U << flag);
+}
+
+static inline void clear_cpu_flag(int flag)
+{
+ S390_lowcore.cpu_flags &= ~(1U << flag);
+}
+
+static inline int test_cpu_flag(int flag)
+{
+ return !!(S390_lowcore.cpu_flags & (1U << flag));
+}
+
+#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
+
+static inline void get_cpu_id(struct cpuid *ptr)
+{
+ asm volatile("stidp %0" : "=Q" (*ptr));
+}
+
+extern void s390_adjust_jiffies(void);
+extern const struct seq_operations cpuinfo_op;
+extern int sysctl_ieee_emulation_warnings;
+extern void execve_tail(void);
+
+/*
+ * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
+ */
+
+#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
+#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
+ (1UL << 30) : (1UL << 41))
+#define TASK_SIZE TASK_SIZE_OF(current)
+#define TASK_MAX_SIZE (1UL << 53)
+
+#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
+#define STACK_TOP_MAX (1UL << 42)
+
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
+typedef struct {
+ __u32 ar4;
+} mm_segment_t;
+
+/*
+ * Thread structure
+ */
+struct thread_struct {
+ s390_fp_regs fp_regs;
+ unsigned int acrs[NUM_ACRS];
+ unsigned long ksp; /* kernel stack pointer */
+ mm_segment_t mm_segment;
+ unsigned long gmap_addr; /* address of last gmap fault. */
+ unsigned int gmap_pfault; /* signal of a pending guest pfault */
+ struct per_regs per_user; /* User specified PER registers */
+ struct per_event per_event; /* Cause of the last PER trap */
+ unsigned long per_flags; /* Flags to control debug behavior */
+ /* pfault_wait is used to block the process on a pfault event */
+ unsigned long pfault_wait;
+ struct list_head list;
+ /* cpu runtime instrumentation */
+ struct runtime_instr_cb *ri_cb;
+ int ri_signum;
+ unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
+ __vector128 *vxrs; /* Vector register save area */
+};
+
+/* Flag to disable transactions. */
+#define PER_FLAG_NO_TE 1UL
+/* Flag to enable random transaction aborts. */
+#define PER_FLAG_TE_ABORT_RAND 2UL
+/* Flag to specify random transaction abort mode:
+ * - abort each transaction at a random instruction before TEND if set.
+ * - abort random transactions at a random instruction if cleared.
+ */
+#define PER_FLAG_TE_ABORT_RAND_TEND 4UL
+
+typedef struct thread_struct thread_struct;
+
+/*
+ * Stack layout of a C stack frame.
+ */
+#ifndef __PACK_STACK
+struct stack_frame {
+ unsigned long back_chain;
+ unsigned long empty1[5];
+ unsigned long gprs[10];
+ unsigned int empty2[8];
+};
+#else
+struct stack_frame {
+ unsigned long empty1[5];
+ unsigned int empty2[8];
+ unsigned long gprs[10];
+ unsigned long back_chain;
+};
+#endif
+
+#define ARCH_MIN_TASKALIGN 8
+
+#define INIT_THREAD { \
+ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
+}
+
+/*
+ * Do necessary setup to start up a new thread.
+ */
+#define start_thread(regs, new_psw, new_stackp) do { \
+ regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \
+ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
+ regs->gprs[15] = new_stackp; \
+ execve_tail(); \
+} while (0)
+
+#define start_thread31(regs, new_psw, new_stackp) do { \
+ regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
+ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
+ regs->gprs[15] = new_stackp; \
+ crst_table_downgrade(current->mm, 1UL << 31); \
+ execve_tail(); \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+struct mm_struct;
+struct seq_file;
+
+void show_cacheinfo(struct seq_file *m);
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+extern unsigned long thread_saved_pc(struct task_struct *t);
+
+unsigned long get_wchan(struct task_struct *p);
+#define task_pt_regs(tsk) ((struct pt_regs *) \
+ (task_stack_page(tsk) + THREAD_SIZE) - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
+
+/* Has task runtime instrumentation enabled ? */
+#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
+
+static inline unsigned short stap(void)
+{
+ unsigned short cpu_address;
+
+ asm volatile("stap %0" : "=m" (cpu_address));
+ return cpu_address;
+}
+
+/*
+ * Give up the time slice of the virtual PU.
+ */
+void cpu_relax(void);
+
+#define cpu_relax_lowlatency() barrier()
+
+static inline void psw_set_key(unsigned int key)
+{
+ asm volatile("spka 0(%0)" : : "d" (key));
+}
+
+/*
+ * Set PSW to specified value.
+ */
+static inline void __load_psw(psw_t psw)
+{
+ asm volatile("lpswe %0" : : "Q" (psw) : "cc");
+}
+
+/*
+ * Set PSW mask to specified value, while leaving the
+ * PSW addr pointing to the next instruction.
+ */
+static inline void __load_psw_mask (unsigned long mask)
+{
+ unsigned long addr;
+ psw_t psw;
+
+ psw.mask = mask;
+
+ asm volatile(
+ " larl %0,1f\n"
+ " stg %0,%O1+8(%R1)\n"
+ " lpswe %1\n"
+ "1:"
+ : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
+}
+
+/*
+ * Rewind PSW instruction address by specified number of bytes.
+ */
+static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
+{
+ unsigned long mask;
+
+ mask = (psw.mask & PSW_MASK_EA) ? -1UL :
+ (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
+ (1UL << 24) - 1;
+ return (psw.addr - ilc) & mask;
+}
+
+/*
+ * Function to stop a processor until the next interrupt occurs
+ */
+void enabled_wait(void);
+
+/*
+ * Function to drop a processor into disabled wait state
+ */
+static inline void __noreturn disabled_wait(unsigned long code)
+{
+ unsigned long ctl_buf;
+ psw_t dw_psw;
+
+ dw_psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
+ dw_psw.addr = code;
+ /*
+ * Store status and then load disabled wait psw,
+ * the processor is dead afterwards
+ */
+ asm volatile(
+ " stctg 0,0,0(%2)\n"
+ " ni 4(%2),0xef\n" /* switch off protection */
+ " lctlg 0,0,0(%2)\n"
+ " lghi 1,0x1000\n"
+ " stpt 0x328(1)\n" /* store timer */
+ " stckc 0x330(1)\n" /* store clock comparator */
+ " stpx 0x318(1)\n" /* store prefix register */
+ " stam 0,15,0x340(1)\n"/* store access registers */
+ " stfpc 0x31c(1)\n" /* store fpu control */
+ " std 0,0x200(1)\n" /* store f0 */
+ " std 1,0x208(1)\n" /* store f1 */
+ " std 2,0x210(1)\n" /* store f2 */
+ " std 3,0x218(1)\n" /* store f3 */
+ " std 4,0x220(1)\n" /* store f4 */
+ " std 5,0x228(1)\n" /* store f5 */
+ " std 6,0x230(1)\n" /* store f6 */
+ " std 7,0x238(1)\n" /* store f7 */
+ " std 8,0x240(1)\n" /* store f8 */
+ " std 9,0x248(1)\n" /* store f9 */
+ " std 10,0x250(1)\n" /* store f10 */
+ " std 11,0x258(1)\n" /* store f11 */
+ " std 12,0x260(1)\n" /* store f12 */
+ " std 13,0x268(1)\n" /* store f13 */
+ " std 14,0x270(1)\n" /* store f14 */
+ " std 15,0x278(1)\n" /* store f15 */
+ " stmg 0,15,0x280(1)\n"/* store general registers */
+ " stctg 0,15,0x380(1)\n"/* store control registers */
+ " oi 0x384(1),0x10\n"/* fake protection bit */
+ " lpswe 0(%1)"
+ : "=m" (ctl_buf)
+ : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
+ while (1);
+}
+
+/*
+ * Use to set psw mask except for the first byte which
+ * won't be changed by this function.
+ */
+static inline void
+__set_psw_mask(unsigned long mask)
+{
+ __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
+}
+
+#define local_mcck_enable() \
+ __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
+#define local_mcck_disable() \
+ __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
+
+/*
+ * Basic Machine Check/Program Check Handler.
+ */
+
+extern void s390_base_mcck_handler(void);
+extern void s390_base_pgm_handler(void);
+extern void s390_base_ext_handler(void);
+
+extern void (*s390_base_mcck_handler_fn)(void);
+extern void (*s390_base_pgm_handler_fn)(void);
+extern void (*s390_base_ext_handler_fn)(void);
+
+#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
+
+extern int memcpy_real(void *, void *, size_t);
+extern void memcpy_absolute(void *, void *, size_t);
+
+#define mem_assign_absolute(dest, val) { \
+ __typeof__(dest) __tmp = (val); \
+ \
+ BUILD_BUG_ON(sizeof(__tmp) != sizeof(val)); \
+ memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
+}
+
+/*
+ * Helper macro for exception table entries
+ */
+#define EX_TABLE(_fault, _target) \
+ ".section __ex_table,\"a\"\n" \
+ ".align 4\n" \
+ ".long (" #_fault ") - .\n" \
+ ".long (" #_target ") - .\n" \
+ ".previous\n"
+
+#else /* __ASSEMBLY__ */
+
+#define EX_TABLE(_fault, _target) \
+ .section __ex_table,"a" ; \
+ .align 4 ; \
+ .long (_fault) - . ; \
+ .long (_target) - . ; \
+ .previous
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/kernel/arch/s390/include/asm/ptrace.h b/kernel/arch/s390/include/asm/ptrace.h
new file mode 100644
index 000000000..6feda2599
--- /dev/null
+++ b/kernel/arch/s390/include/asm/ptrace.h
@@ -0,0 +1,177 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999, 2000
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+#ifndef _S390_PTRACE_H
+#define _S390_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#define PIF_SYSCALL 0 /* inside a system call */
+#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */
+
+#define _PIF_SYSCALL (1<<PIF_SYSCALL)
+#define _PIF_PER_TRAP (1<<PIF_PER_TRAP)
+
+#ifndef __ASSEMBLY__
+
+#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
+ PSW_MASK_EA | PSW_MASK_BA)
+#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
+ PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
+ PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
+
+struct psw_bits {
+ unsigned long long : 1;
+ unsigned long long r : 1; /* PER-Mask */
+ unsigned long long : 3;
+ unsigned long long t : 1; /* DAT Mode */
+ unsigned long long i : 1; /* Input/Output Mask */
+ unsigned long long e : 1; /* External Mask */
+ unsigned long long key : 4; /* PSW Key */
+ unsigned long long : 1;
+ unsigned long long m : 1; /* Machine-Check Mask */
+ unsigned long long w : 1; /* Wait State */
+ unsigned long long p : 1; /* Problem State */
+ unsigned long long as : 2; /* Address Space Control */
+ unsigned long long cc : 2; /* Condition Code */
+ unsigned long long pm : 4; /* Program Mask */
+ unsigned long long ri : 1; /* Runtime Instrumentation */
+ unsigned long long : 6;
+ unsigned long long eaba : 2; /* Addressing Mode */
+ unsigned long long : 31;
+ unsigned long long ia : 64;/* Instruction Address */
+};
+
+enum {
+ PSW_AMODE_24BIT = 0,
+ PSW_AMODE_31BIT = 1,
+ PSW_AMODE_64BIT = 3
+};
+
+enum {
+ PSW_AS_PRIMARY = 0,
+ PSW_AS_ACCREG = 1,
+ PSW_AS_SECONDARY = 2,
+ PSW_AS_HOME = 3
+};
+
+#define psw_bits(__psw) (*({ \
+ typecheck(psw_t, __psw); \
+ &(*(struct psw_bits *)(&(__psw))); \
+}))
+
+/*
+ * The pt_regs struct defines the way the registers are stored on
+ * the stack during a system call.
+ */
+struct pt_regs
+{
+ unsigned long args[1];
+ psw_t psw;
+ unsigned long gprs[NUM_GPRS];
+ unsigned long orig_gpr2;
+ unsigned int int_code;
+ unsigned int int_parm;
+ unsigned long int_parm_long;
+ unsigned long flags;
+};
+
+/*
+ * Program event recording (PER) register set.
+ */
+struct per_regs {
+ unsigned long control; /* PER control bits */
+ unsigned long start; /* PER starting address */
+ unsigned long end; /* PER ending address */
+};
+
+/*
+ * PER event contains information about the cause of the last PER exception.
+ */
+struct per_event {
+ unsigned short cause; /* PER code, ATMID and AI */
+ unsigned long address; /* PER address */
+ unsigned char paid; /* PER access identification */
+};
+
+/*
+ * Simplified per_info structure used to decode the ptrace user space ABI.
+ */
+struct per_struct_kernel {
+ unsigned long cr9; /* PER control bits */
+ unsigned long cr10; /* PER starting address */
+ unsigned long cr11; /* PER ending address */
+ unsigned long bits; /* Obsolete software bits */
+ unsigned long starting_addr; /* User specified start address */
+ unsigned long ending_addr; /* User specified end address */
+ unsigned short perc_atmid; /* PER trap ATMID */
+ unsigned long address; /* PER trap instruction address */
+ unsigned char access_id; /* PER trap access identification */
+};
+
+#define PER_EVENT_MASK 0xEB000000UL
+
+#define PER_EVENT_BRANCH 0x80000000UL
+#define PER_EVENT_IFETCH 0x40000000UL
+#define PER_EVENT_STORE 0x20000000UL
+#define PER_EVENT_STORE_REAL 0x08000000UL
+#define PER_EVENT_TRANSACTION_END 0x02000000UL
+#define PER_EVENT_NULLIFICATION 0x01000000UL
+
+#define PER_CONTROL_MASK 0x00e00000UL
+
+#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
+#define PER_CONTROL_SUSPENSION 0x00400000UL
+#define PER_CONTROL_ALTERATION 0x00200000UL
+
+static inline void set_pt_regs_flag(struct pt_regs *regs, int flag)
+{
+ regs->flags |= (1U << flag);
+}
+
+static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag)
+{
+ regs->flags &= ~(1U << flag);
+}
+
+static inline int test_pt_regs_flag(struct pt_regs *regs, int flag)
+{
+ return !!(regs->flags & (1U << flag));
+}
+
+/*
+ * These are defined as per linux/ptrace.h, which see.
+ */
+#define arch_has_single_step() (1)
+#define arch_has_block_step() (1)
+
+#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
+#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
+#define user_stack_pointer(regs)((regs)->gprs[15])
+#define profile_pc(regs) instruction_pointer(regs)
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ return regs->gprs[2];
+}
+
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->psw.addr = val | PSW_ADDR_AMODE;
+}
+
+int regs_query_register_offset(const char *name);
+const char *regs_query_register_name(unsigned int offset);
+unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
+
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->gprs[15] & PSW_ADDR_INSN;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _S390_PTRACE_H */
diff --git a/kernel/arch/s390/include/asm/qdio.h b/kernel/arch/s390/include/asm/qdio.h
new file mode 100644
index 000000000..998b61cd0
--- /dev/null
+++ b/kernel/arch/s390/include/asm/qdio.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright IBM Corp. 2000, 2008
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ *
+ */
+#ifndef __QDIO_H__
+#define __QDIO_H__
+
+#include <linux/interrupt.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+
+/* only use 4 queues to save some cachelines */
+#define QDIO_MAX_QUEUES_PER_IRQ 4
+#define QDIO_MAX_BUFFERS_PER_Q 128
+#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
+#define QDIO_MAX_ELEMENTS_PER_BUFFER 16
+#define QDIO_SBAL_SIZE 256
+
+#define QDIO_QETH_QFMT 0
+#define QDIO_ZFCP_QFMT 1
+#define QDIO_IQDIO_QFMT 2
+
+/**
+ * struct qdesfmt0 - queue descriptor, format 0
+ * @sliba: storage list information block address
+ * @sla: storage list address
+ * @slsba: storage list state block address
+ * @akey: access key for DLIB
+ * @bkey: access key for SL
+ * @ckey: access key for SBALs
+ * @dkey: access key for SLSB
+ */
+struct qdesfmt0 {
+ u64 sliba;
+ u64 sla;
+ u64 slsba;
+ u32 : 32;
+ u32 akey : 4;
+ u32 bkey : 4;
+ u32 ckey : 4;
+ u32 dkey : 4;
+ u32 : 16;
+} __attribute__ ((packed));
+
+#define QDR_AC_MULTI_BUFFER_ENABLE 0x01
+
+/**
+ * struct qdr - queue description record (QDR)
+ * @qfmt: queue format
+ * @pfmt: implementation dependent parameter format
+ * @ac: adapter characteristics
+ * @iqdcnt: input queue descriptor count
+ * @oqdcnt: output queue descriptor count
+ * @iqdsz: inpout queue descriptor size
+ * @oqdsz: output queue descriptor size
+ * @qiba: queue information block address
+ * @qkey: queue information block key
+ * @qdf0: queue descriptions
+ */
+struct qdr {
+ u32 qfmt : 8;
+ u32 pfmt : 8;
+ u32 : 8;
+ u32 ac : 8;
+ u32 : 8;
+ u32 iqdcnt : 8;
+ u32 : 8;
+ u32 oqdcnt : 8;
+ u32 : 8;
+ u32 iqdsz : 8;
+ u32 : 8;
+ u32 oqdsz : 8;
+ /* private: */
+ u32 res[9];
+ /* public: */
+ u64 qiba;
+ u32 : 32;
+ u32 qkey : 4;
+ u32 : 28;
+ struct qdesfmt0 qdf0[126];
+} __attribute__ ((packed, aligned(4096)));
+
+#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
+#define QIB_RFLAGS_ENABLE_QEBSM 0x80
+#define QIB_RFLAGS_ENABLE_DATA_DIV 0x02
+
+/**
+ * struct qib - queue information block (QIB)
+ * @qfmt: queue format
+ * @pfmt: implementation dependent parameter format
+ * @rflags: QEBSM
+ * @ac: adapter characteristics
+ * @isliba: absolute address of first input SLIB
+ * @osliba: absolute address of first output SLIB
+ * @ebcnam: adapter identifier in EBCDIC
+ * @parm: implementation dependent parameters
+ */
+struct qib {
+ u32 qfmt : 8;
+ u32 pfmt : 8;
+ u32 rflags : 8;
+ u32 ac : 8;
+ u32 : 32;
+ u64 isliba;
+ u64 osliba;
+ u32 : 32;
+ u32 : 32;
+ u8 ebcnam[8];
+ /* private: */
+ u8 res[88];
+ /* public: */
+ u8 parm[QDIO_MAX_BUFFERS_PER_Q];
+} __attribute__ ((packed, aligned(256)));
+
+/**
+ * struct slibe - storage list information block element (SLIBE)
+ * @parms: implementation dependent parameters
+ */
+struct slibe {
+ u64 parms;
+};
+
+/**
+ * struct qaob - queue asynchronous operation block
+ * @res0: reserved parameters
+ * @res1: reserved parameter
+ * @res2: reserved parameter
+ * @res3: reserved parameter
+ * @aorc: asynchronous operation return code
+ * @flags: internal flags
+ * @cbtbs: control block type
+ * @sb_count: number of storage blocks
+ * @sba: storage block element addresses
+ * @dcount: size of storage block elements
+ * @user0: user defineable value
+ * @res4: reserved paramater
+ * @user1: user defineable value
+ * @user2: user defineable value
+ */
+struct qaob {
+ u64 res0[6];
+ u8 res1;
+ u8 res2;
+ u8 res3;
+ u8 aorc;
+ u8 flags;
+ u16 cbtbs;
+ u8 sb_count;
+ u64 sba[QDIO_MAX_ELEMENTS_PER_BUFFER];
+ u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER];
+ u64 user0;
+ u64 res4[2];
+ u64 user1;
+ u64 user2;
+} __attribute__ ((packed, aligned(256)));
+
+/**
+ * struct slib - storage list information block (SLIB)
+ * @nsliba: next SLIB address (if any)
+ * @sla: SL address
+ * @slsba: SLSB address
+ * @slibe: SLIB elements
+ */
+struct slib {
+ u64 nsliba;
+ u64 sla;
+ u64 slsba;
+ /* private: */
+ u8 res[1000];
+ /* public: */
+ struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q];
+} __attribute__ ((packed, aligned(2048)));
+
+#define SBAL_EFLAGS_LAST_ENTRY 0x40
+#define SBAL_EFLAGS_CONTIGUOUS 0x20
+#define SBAL_EFLAGS_FIRST_FRAG 0x04
+#define SBAL_EFLAGS_MIDDLE_FRAG 0x08
+#define SBAL_EFLAGS_LAST_FRAG 0x0c
+#define SBAL_EFLAGS_MASK 0x6f
+
+#define SBAL_SFLAGS0_PCI_REQ 0x40
+#define SBAL_SFLAGS0_DATA_CONTINUATION 0x20
+
+/* Awesome OpenFCP extensions */
+#define SBAL_SFLAGS0_TYPE_STATUS 0x00
+#define SBAL_SFLAGS0_TYPE_WRITE 0x08
+#define SBAL_SFLAGS0_TYPE_READ 0x10
+#define SBAL_SFLAGS0_TYPE_WRITE_READ 0x18
+#define SBAL_SFLAGS0_MORE_SBALS 0x04
+#define SBAL_SFLAGS0_COMMAND 0x02
+#define SBAL_SFLAGS0_LAST_SBAL 0x00
+#define SBAL_SFLAGS0_ONLY_SBAL SBAL_SFLAGS0_COMMAND
+#define SBAL_SFLAGS0_MIDDLE_SBAL SBAL_SFLAGS0_MORE_SBALS
+#define SBAL_SFLAGS0_FIRST_SBAL (SBAL_SFLAGS0_MORE_SBALS | SBAL_SFLAGS0_COMMAND)
+
+/**
+ * struct qdio_buffer_element - SBAL entry
+ * @eflags: SBAL entry flags
+ * @scount: SBAL count
+ * @sflags: whole SBAL flags
+ * @length: length
+ * @addr: address
+*/
+struct qdio_buffer_element {
+ u8 eflags;
+ /* private: */
+ u8 res1;
+ /* public: */
+ u8 scount;
+ u8 sflags;
+ u32 length;
+ void *addr;
+} __attribute__ ((packed, aligned(16)));
+
+/**
+ * struct qdio_buffer - storage block address list (SBAL)
+ * @element: SBAL entries
+ */
+struct qdio_buffer {
+ struct qdio_buffer_element element[QDIO_MAX_ELEMENTS_PER_BUFFER];
+} __attribute__ ((packed, aligned(256)));
+
+/**
+ * struct sl_element - storage list entry
+ * @sbal: absolute SBAL address
+ */
+struct sl_element {
+ unsigned long sbal;
+} __attribute__ ((packed));
+
+/**
+ * struct sl - storage list (SL)
+ * @element: SL entries
+ */
+struct sl {
+ struct sl_element element[QDIO_MAX_BUFFERS_PER_Q];
+} __attribute__ ((packed, aligned(1024)));
+
+/**
+ * struct slsb - storage list state block (SLSB)
+ * @val: state per buffer
+ */
+struct slsb {
+ u8 val[QDIO_MAX_BUFFERS_PER_Q];
+} __attribute__ ((packed, aligned(256)));
+
+/**
+ * struct qdio_outbuf_state - SBAL related asynchronous operation information
+ * (for communication with upper layer programs)
+ * (only required for use with completion queues)
+ * @flags: flags indicating state of buffer
+ * @aob: pointer to QAOB used for the particular SBAL
+ * @user: pointer to upper layer program's state information related to SBAL
+ * (stored in user1 data of QAOB)
+ */
+struct qdio_outbuf_state {
+ u8 flags;
+ struct qaob *aob;
+ void *user;
+};
+
+#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
+#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
+
+#define CHSC_AC1_INITIATE_INPUTQ 0x80
+
+
+/* qdio adapter-characteristics-1 flag */
+#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
+#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
+#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
+#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
+#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
+#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
+#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
+
+#define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080
+#define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040
+#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010
+#define CHSC_AC2_DATA_DIV_ENABLED 0x0002
+
+#define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000
+
+struct qdio_ssqd_desc {
+ u8 flags;
+ u8:8;
+ u16 sch;
+ u8 qfmt;
+ u8 parm;
+ u8 qdioac1;
+ u8 sch_class;
+ u8 pcnt;
+ u8 icnt;
+ u8:8;
+ u8 ocnt;
+ u8:8;
+ u8 mbccnt;
+ u16 qdioac2;
+ u64 sch_token;
+ u8 mro;
+ u8 mri;
+ u16 qdioac3;
+ u16:16;
+ u8:8;
+ u8 mmwc;
+} __attribute__ ((packed));
+
+/* params are: ccw_device, qdio_error, queue_number,
+ first element processed, number of elements processed, int_parm */
+typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
+ int, int, unsigned long);
+
+/* qdio errors reported to the upper-layer program */
+#define QDIO_ERROR_ACTIVATE 0x0001
+#define QDIO_ERROR_GET_BUF_STATE 0x0002
+#define QDIO_ERROR_SET_BUF_STATE 0x0004
+#define QDIO_ERROR_SLSB_STATE 0x0100
+
+#define QDIO_ERROR_FATAL 0x00ff
+#define QDIO_ERROR_TEMPORARY 0xff00
+
+/* for qdio_cleanup */
+#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01
+#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
+
+/**
+ * struct qdio_initialize - qdio initialization data
+ * @cdev: associated ccw device
+ * @q_format: queue format
+ * @adapter_name: name for the adapter
+ * @qib_param_field_format: format for qib_parm_field
+ * @qib_param_field: pointer to 128 bytes or NULL, if no param field
+ * @qib_rflags: rflags to set
+ * @input_slib_elements: pointer to no_input_qs * 128 words of data or NULL
+ * @output_slib_elements: pointer to no_output_qs * 128 words of data or NULL
+ * @no_input_qs: number of input queues
+ * @no_output_qs: number of output queues
+ * @input_handler: handler to be called for input queues
+ * @output_handler: handler to be called for output queues
+ * @queue_start_poll_array: polling handlers (one per input queue or NULL)
+ * @int_parm: interruption parameter
+ * @input_sbal_addr_array: address of no_input_qs * 128 pointers
+ * @output_sbal_addr_array: address of no_output_qs * 128 pointers
+ * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL)
+ */
+struct qdio_initialize {
+ struct ccw_device *cdev;
+ unsigned char q_format;
+ unsigned char qdr_ac;
+ unsigned char adapter_name[8];
+ unsigned int qib_param_field_format;
+ unsigned char *qib_param_field;
+ unsigned char qib_rflags;
+ unsigned long *input_slib_elements;
+ unsigned long *output_slib_elements;
+ unsigned int no_input_qs;
+ unsigned int no_output_qs;
+ qdio_handler_t *input_handler;
+ qdio_handler_t *output_handler;
+ void (**queue_start_poll_array) (struct ccw_device *, int,
+ unsigned long);
+ int scan_threshold;
+ unsigned long int_parm;
+ void **input_sbal_addr_array;
+ void **output_sbal_addr_array;
+ struct qdio_outbuf_state *output_sbal_state_array;
+};
+
+/**
+ * enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc()
+ * @l3_ipv6_addr: entry contains IPv6 address
+ * @l3_ipv4_addr: entry contains IPv4 address
+ * @l2_addr_lnid: entry contains MAC address and VLAN ID
+ */
+enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid};
+
+/**
+ * struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc()
+ * @nit: Network interface token
+ * @addr: Address of one of the three types
+ *
+ * The struct is passed to the callback function by qdio_brinfo_desc()
+ */
+struct qdio_brinfo_entry_l3_ipv6 {
+ u64 nit;
+ struct { unsigned char _s6_addr[16]; } addr;
+} __packed;
+struct qdio_brinfo_entry_l3_ipv4 {
+ u64 nit;
+ struct { uint32_t _s_addr; } addr;
+} __packed;
+struct qdio_brinfo_entry_l2 {
+ u64 nit;
+ struct { u8 mac[6]; u16 lnid; } addr_lnid;
+} __packed;
+
+#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
+#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */
+#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
+#define QDIO_STATE_STOPPED 0x00000010 /* after queues went down */
+
+#define QDIO_FLAG_SYNC_INPUT 0x01
+#define QDIO_FLAG_SYNC_OUTPUT 0x02
+#define QDIO_FLAG_PCI_OUT 0x10
+
+int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count);
+void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count);
+void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count);
+
+extern int qdio_allocate(struct qdio_initialize *);
+extern int qdio_establish(struct qdio_initialize *);
+extern int qdio_activate(struct ccw_device *);
+extern void qdio_release_aob(struct qaob *);
+extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
+ unsigned int);
+extern int qdio_start_irq(struct ccw_device *, int);
+extern int qdio_stop_irq(struct ccw_device *, int);
+extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
+extern int qdio_shutdown(struct ccw_device *, int);
+extern int qdio_free(struct ccw_device *);
+extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
+extern int qdio_pnso_brinfo(struct subchannel_id schid,
+ int cnc, u16 *response,
+ void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
+ void *entry),
+ void *priv);
+
+#endif /* __QDIO_H__ */
diff --git a/kernel/arch/s390/include/asm/reset.h b/kernel/arch/s390/include/asm/reset.h
new file mode 100644
index 000000000..72786067b
--- /dev/null
+++ b/kernel/arch/s390/include/asm/reset.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright IBM Corp. 2006
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_RESET_H
+#define _ASM_S390_RESET_H
+
+#include <linux/list.h>
+
+struct reset_call {
+ struct list_head list;
+ void (*fn)(void);
+};
+
+extern void register_reset_call(struct reset_call *reset);
+extern void unregister_reset_call(struct reset_call *reset);
+extern void s390_reset_system(void (*fn_pre)(void),
+ void (*fn_post)(void *), void *data);
+#endif /* _ASM_S390_RESET_H */
diff --git a/kernel/arch/s390/include/asm/runtime_instr.h b/kernel/arch/s390/include/asm/runtime_instr.h
new file mode 100644
index 000000000..402ad6df4
--- /dev/null
+++ b/kernel/arch/s390/include/asm/runtime_instr.h
@@ -0,0 +1,90 @@
+#ifndef _RUNTIME_INSTR_H
+#define _RUNTIME_INSTR_H
+
+#define S390_RUNTIME_INSTR_START 0x1
+#define S390_RUNTIME_INSTR_STOP 0x2
+
+struct runtime_instr_cb {
+ __u64 buf_current;
+ __u64 buf_origin;
+ __u64 buf_limit;
+
+ __u32 valid : 1;
+ __u32 pstate : 1;
+ __u32 pstate_set_buf : 1;
+ __u32 home_space : 1;
+ __u32 altered : 1;
+ __u32 : 3;
+ __u32 pstate_sample : 1;
+ __u32 sstate_sample : 1;
+ __u32 pstate_collect : 1;
+ __u32 sstate_collect : 1;
+ __u32 : 1;
+ __u32 halted_int : 1;
+ __u32 int_requested : 1;
+ __u32 buffer_full_int : 1;
+ __u32 key : 4;
+ __u32 : 9;
+ __u32 rgs : 3;
+
+ __u32 mode : 4;
+ __u32 next : 1;
+ __u32 mae : 1;
+ __u32 : 2;
+ __u32 call_type_br : 1;
+ __u32 return_type_br : 1;
+ __u32 other_type_br : 1;
+ __u32 bc_other_type : 1;
+ __u32 emit : 1;
+ __u32 tx_abort : 1;
+ __u32 : 2;
+ __u32 bp_xn : 1;
+ __u32 bp_xt : 1;
+ __u32 bp_ti : 1;
+ __u32 bp_ni : 1;
+ __u32 suppr_y : 1;
+ __u32 suppr_z : 1;
+
+ __u32 dc_miss_extra : 1;
+ __u32 lat_lev_ignore : 1;
+ __u32 ic_lat_lev : 4;
+ __u32 dc_lat_lev : 4;
+
+ __u64 reserved1;
+ __u64 scaling_factor;
+ __u64 rsic;
+ __u64 reserved2;
+} __packed __aligned(8);
+
+extern struct runtime_instr_cb runtime_instr_empty_cb;
+
+static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb)
+{
+ asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */
+ : : "Q" (*cb));
+}
+
+static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
+{
+ asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */
+ : "=Q" (*cb) : : "cc");
+}
+
+static inline void save_ri_cb(struct runtime_instr_cb *cb_prev)
+{
+ if (cb_prev)
+ store_runtime_instr_cb(cb_prev);
+}
+
+static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
+ struct runtime_instr_cb *cb_prev)
+{
+ if (cb_next)
+ load_runtime_instr_cb(cb_next);
+ else if (cb_prev)
+ load_runtime_instr_cb(&runtime_instr_empty_cb);
+}
+
+void exit_thread_runtime_instr(void);
+
+#endif /* _RUNTIME_INSTR_H */
diff --git a/kernel/arch/s390/include/asm/rwsem.h b/kernel/arch/s390/include/asm/rwsem.h
new file mode 100644
index 000000000..4b43ee7e6
--- /dev/null
+++ b/kernel/arch/s390/include/asm/rwsem.h
@@ -0,0 +1,237 @@
+#ifndef _S390_RWSEM_H
+#define _S390_RWSEM_H
+
+/*
+ * S390 version
+ * Copyright IBM Corp. 2002
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
+ */
+
+/*
+ *
+ * The MSW of the count is the negated number of active writers and waiting
+ * lockers, and the LSW is the total number of active locks
+ *
+ * The lock count is initialized to 0 (no active and no waiting lockers).
+ *
+ * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
+ * uncontended lock. This can be determined because XADD returns the old value.
+ * Readers increment by 1 and see a positive value when uncontended, negative
+ * if there are writers (and maybe) readers waiting (in which case it goes to
+ * sleep).
+ *
+ * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
+ * be extended to 65534 by manually checking the whole MSW rather than relying
+ * on the S flag.
+ *
+ * The value of ACTIVE_BIAS supports up to 65535 active processes.
+ *
+ * This should be totally fair - if anything is waiting, a process that wants a
+ * lock will go to the back of the queue. When the currently active lock is
+ * released, if there's a writer at the front of the queue, then that and only
+ * that will be woken up; if there's a bunch of consequtive readers at the
+ * front, then they'll all be woken up, but no other readers will be.
+ */
+
+#ifndef _LINUX_RWSEM_H
+#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
+#endif
+
+#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
+#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
+#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
+#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ signed long old, new;
+
+ asm volatile(
+ " lg %0,%2\n"
+ "0: lgr %1,%0\n"
+ " aghi %1,%4\n"
+ " csg %0,%1,%2\n"
+ " jl 0b"
+ : "=&d" (old), "=&d" (new), "=Q" (sem->count)
+ : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
+ : "cc", "memory");
+ if (old < 0)
+ rwsem_down_read_failed(sem);
+}
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ signed long old, new;
+
+ asm volatile(
+ " lg %0,%2\n"
+ "0: ltgr %1,%0\n"
+ " jm 1f\n"
+ " aghi %1,%4\n"
+ " csg %0,%1,%2\n"
+ " jl 0b\n"
+ "1:"
+ : "=&d" (old), "=&d" (new), "=Q" (sem->count)
+ : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
+ : "cc", "memory");
+ return old >= 0 ? 1 : 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+ signed long old, new, tmp;
+
+ tmp = RWSEM_ACTIVE_WRITE_BIAS;
+ asm volatile(
+ " lg %0,%2\n"
+ "0: lgr %1,%0\n"
+ " ag %1,%4\n"
+ " csg %0,%1,%2\n"
+ " jl 0b"
+ : "=&d" (old), "=&d" (new), "=Q" (sem->count)
+ : "Q" (sem->count), "m" (tmp)
+ : "cc", "memory");
+ if (old != 0)
+ rwsem_down_write_failed(sem);
+}
+
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ __down_write_nested(sem, 0);
+}
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ signed long old;
+
+ asm volatile(
+ " lg %0,%1\n"
+ "0: ltgr %0,%0\n"
+ " jnz 1f\n"
+ " csg %0,%3,%1\n"
+ " jl 0b\n"
+ "1:"
+ : "=&d" (old), "=Q" (sem->count)
+ : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
+ : "cc", "memory");
+ return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ signed long old, new;
+
+ asm volatile(
+ " lg %0,%2\n"
+ "0: lgr %1,%0\n"
+ " aghi %1,%4\n"
+ " csg %0,%1,%2\n"
+ " jl 0b"
+ : "=&d" (old), "=&d" (new), "=Q" (sem->count)
+ : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
+ : "cc", "memory");
+ if (new < 0)
+ if ((new & RWSEM_ACTIVE_MASK) == 0)
+ rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ signed long old, new, tmp;
+
+ tmp = -RWSEM_ACTIVE_WRITE_BIAS;
+ asm volatile(
+ " lg %0,%2\n"
+ "0: lgr %1,%0\n"
+ " ag %1,%4\n"
+ " csg %0,%1,%2\n"
+ " jl 0b"
+ : "=&d" (old), "=&d" (new), "=Q" (sem->count)
+ : "Q" (sem->count), "m" (tmp)
+ : "cc", "memory");
+ if (new < 0)
+ if ((new & RWSEM_ACTIVE_MASK) == 0)
+ rwsem_wake(sem);
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ signed long old, new, tmp;
+
+ tmp = -RWSEM_WAITING_BIAS;
+ asm volatile(
+ " lg %0,%2\n"
+ "0: lgr %1,%0\n"
+ " ag %1,%4\n"
+ " csg %0,%1,%2\n"
+ " jl 0b"
+ : "=&d" (old), "=&d" (new), "=Q" (sem->count)
+ : "Q" (sem->count), "m" (tmp)
+ : "cc", "memory");
+ if (new > 1)
+ rwsem_downgrade_wake(sem);
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+{
+ signed long old, new;
+
+ asm volatile(
+ " lg %0,%2\n"
+ "0: lgr %1,%0\n"
+ " agr %1,%4\n"
+ " csg %0,%1,%2\n"
+ " jl 0b"
+ : "=&d" (old), "=&d" (new), "=Q" (sem->count)
+ : "Q" (sem->count), "d" (delta)
+ : "cc", "memory");
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
+{
+ signed long old, new;
+
+ asm volatile(
+ " lg %0,%2\n"
+ "0: lgr %1,%0\n"
+ " agr %1,%4\n"
+ " csg %0,%1,%2\n"
+ " jl 0b"
+ : "=&d" (old), "=&d" (new), "=Q" (sem->count)
+ : "Q" (sem->count), "d" (delta)
+ : "cc", "memory");
+ return new;
+}
+
+#endif /* _S390_RWSEM_H */
diff --git a/kernel/arch/s390/include/asm/schid.h b/kernel/arch/s390/include/asm/schid.h
new file mode 100644
index 000000000..40b47dfa9
--- /dev/null
+++ b/kernel/arch/s390/include/asm/schid.h
@@ -0,0 +1,21 @@
+#ifndef ASM_SCHID_H
+#define ASM_SCHID_H
+
+#include <linux/string.h>
+#include <uapi/asm/schid.h>
+
+/* Helper function for sane state of pre-allocated subchannel_id. */
+static inline void
+init_subchannel_id(struct subchannel_id *schid)
+{
+ memset(schid, 0, sizeof(struct subchannel_id));
+ schid->one = 1;
+}
+
+static inline int
+schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2)
+{
+ return !memcmp(schid1, schid2, sizeof(struct subchannel_id));
+}
+
+#endif /* ASM_SCHID_H */
diff --git a/kernel/arch/s390/include/asm/sclp.h b/kernel/arch/s390/include/asm/sclp.h
new file mode 100644
index 000000000..f1096bab5
--- /dev/null
+++ b/kernel/arch/s390/include/asm/sclp.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright IBM Corp. 2007
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_SCLP_H
+#define _ASM_S390_SCLP_H
+
+#include <linux/types.h>
+#include <asm/chpid.h>
+#include <asm/cpu.h>
+
+#define SCLP_CHP_INFO_MASK_SIZE 32
+
+struct sclp_chp_info {
+ u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+ u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+ u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+};
+
+#define LOADPARM_LEN 8
+
+struct sclp_ipl_info {
+ int is_valid;
+ int has_dump;
+ char loadparm[LOADPARM_LEN];
+};
+
+struct sclp_cpu_entry {
+ u8 core_id;
+ u8 reserved0[2];
+ u8 : 3;
+ u8 siif : 1;
+ u8 sigpif : 1;
+ u8 : 3;
+ u8 reserved2[10];
+ u8 type;
+ u8 reserved1;
+} __attribute__((packed));
+
+struct sclp_cpu_info {
+ unsigned int configured;
+ unsigned int standby;
+ unsigned int combined;
+ int has_cpu_type;
+ struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1];
+};
+
+int sclp_get_cpu_info(struct sclp_cpu_info *info);
+int sclp_cpu_configure(u8 cpu);
+int sclp_cpu_deconfigure(u8 cpu);
+unsigned long long sclp_get_rnmax(void);
+unsigned long long sclp_get_rzm(void);
+unsigned int sclp_get_max_cpu(void);
+unsigned int sclp_get_mtid(u8 cpu_type);
+unsigned int sclp_get_mtid_max(void);
+unsigned int sclp_get_mtid_prev(void);
+int sclp_sdias_blk_count(void);
+int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
+int sclp_chp_configure(struct chp_id chpid);
+int sclp_chp_deconfigure(struct chp_id chpid);
+int sclp_chp_read_info(struct sclp_chp_info *info);
+void sclp_get_ipl_info(struct sclp_ipl_info *info);
+bool __init sclp_has_linemode(void);
+bool __init sclp_has_vt220(void);
+bool sclp_has_sprp(void);
+int sclp_pci_configure(u32 fid);
+int sclp_pci_deconfigure(u32 fid);
+int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
+unsigned long sclp_get_hsa_size(void);
+void sclp_early_detect(void);
+int sclp_has_siif(void);
+int sclp_has_sigpif(void);
+unsigned int sclp_get_ibc(void);
+
+long _sclp_print_early(const char *);
+
+#endif /* _ASM_S390_SCLP_H */
diff --git a/kernel/arch/s390/include/asm/scsw.h b/kernel/arch/s390/include/asm/scsw.h
new file mode 100644
index 000000000..4af99cdad
--- /dev/null
+++ b/kernel/arch/s390/include/asm/scsw.h
@@ -0,0 +1,988 @@
+/*
+ * Helper functions for scsw access.
+ *
+ * Copyright IBM Corp. 2008, 2012
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_SCSW_H_
+#define _ASM_S390_SCSW_H_
+
+#include <linux/types.h>
+#include <asm/css_chars.h>
+#include <asm/cio.h>
+
+/**
+ * struct cmd_scsw - command-mode subchannel status word
+ * @key: subchannel key
+ * @sctl: suspend control
+ * @eswf: esw format
+ * @cc: deferred condition code
+ * @fmt: format
+ * @pfch: prefetch
+ * @isic: initial-status interruption control
+ * @alcc: address-limit checking control
+ * @ssi: suppress-suspended interruption
+ * @zcc: zero condition code
+ * @ectl: extended control
+ * @pno: path not operational
+ * @res: reserved
+ * @fctl: function control
+ * @actl: activity control
+ * @stctl: status control
+ * @cpa: channel program address
+ * @dstat: device status
+ * @cstat: subchannel status
+ * @count: residual count
+ */
+struct cmd_scsw {
+ __u32 key : 4;
+ __u32 sctl : 1;
+ __u32 eswf : 1;
+ __u32 cc : 2;
+ __u32 fmt : 1;
+ __u32 pfch : 1;
+ __u32 isic : 1;
+ __u32 alcc : 1;
+ __u32 ssi : 1;
+ __u32 zcc : 1;
+ __u32 ectl : 1;
+ __u32 pno : 1;
+ __u32 res : 1;
+ __u32 fctl : 3;
+ __u32 actl : 7;
+ __u32 stctl : 5;
+ __u32 cpa;
+ __u32 dstat : 8;
+ __u32 cstat : 8;
+ __u32 count : 16;
+} __attribute__ ((packed));
+
+/**
+ * struct tm_scsw - transport-mode subchannel status word
+ * @key: subchannel key
+ * @eswf: esw format
+ * @cc: deferred condition code
+ * @fmt: format
+ * @x: IRB-format control
+ * @q: interrogate-complete
+ * @ectl: extended control
+ * @pno: path not operational
+ * @fctl: function control
+ * @actl: activity control
+ * @stctl: status control
+ * @tcw: TCW address
+ * @dstat: device status
+ * @cstat: subchannel status
+ * @fcxs: FCX status
+ * @schxs: subchannel-extended status
+ */
+struct tm_scsw {
+ u32 key:4;
+ u32 :1;
+ u32 eswf:1;
+ u32 cc:2;
+ u32 fmt:3;
+ u32 x:1;
+ u32 q:1;
+ u32 :1;
+ u32 ectl:1;
+ u32 pno:1;
+ u32 :1;
+ u32 fctl:3;
+ u32 actl:7;
+ u32 stctl:5;
+ u32 tcw;
+ u32 dstat:8;
+ u32 cstat:8;
+ u32 fcxs:8;
+ u32 schxs:8;
+} __attribute__ ((packed));
+
+/**
+ * struct eadm_scsw - subchannel status word for eadm subchannels
+ * @key: subchannel key
+ * @eswf: esw format
+ * @cc: deferred condition code
+ * @ectl: extended control
+ * @fctl: function control
+ * @actl: activity control
+ * @stctl: status control
+ * @aob: AOB address
+ * @dstat: device status
+ * @cstat: subchannel status
+ */
+struct eadm_scsw {
+ u32 key:4;
+ u32:1;
+ u32 eswf:1;
+ u32 cc:2;
+ u32:6;
+ u32 ectl:1;
+ u32:2;
+ u32 fctl:3;
+ u32 actl:7;
+ u32 stctl:5;
+ u32 aob;
+ u32 dstat:8;
+ u32 cstat:8;
+ u32:16;
+} __packed;
+
+/**
+ * union scsw - subchannel status word
+ * @cmd: command-mode SCSW
+ * @tm: transport-mode SCSW
+ * @eadm: eadm SCSW
+ */
+union scsw {
+ struct cmd_scsw cmd;
+ struct tm_scsw tm;
+ struct eadm_scsw eadm;
+} __packed;
+
+#define SCSW_FCTL_CLEAR_FUNC 0x1
+#define SCSW_FCTL_HALT_FUNC 0x2
+#define SCSW_FCTL_START_FUNC 0x4
+
+#define SCSW_ACTL_SUSPENDED 0x1
+#define SCSW_ACTL_DEVACT 0x2
+#define SCSW_ACTL_SCHACT 0x4
+#define SCSW_ACTL_CLEAR_PEND 0x8
+#define SCSW_ACTL_HALT_PEND 0x10
+#define SCSW_ACTL_START_PEND 0x20
+#define SCSW_ACTL_RESUME_PEND 0x40
+
+#define SCSW_STCTL_STATUS_PEND 0x1
+#define SCSW_STCTL_SEC_STATUS 0x2
+#define SCSW_STCTL_PRIM_STATUS 0x4
+#define SCSW_STCTL_INTER_STATUS 0x8
+#define SCSW_STCTL_ALERT_STATUS 0x10
+
+#define DEV_STAT_ATTENTION 0x80
+#define DEV_STAT_STAT_MOD 0x40
+#define DEV_STAT_CU_END 0x20
+#define DEV_STAT_BUSY 0x10
+#define DEV_STAT_CHN_END 0x08
+#define DEV_STAT_DEV_END 0x04
+#define DEV_STAT_UNIT_CHECK 0x02
+#define DEV_STAT_UNIT_EXCEP 0x01
+
+#define SCHN_STAT_PCI 0x80
+#define SCHN_STAT_INCORR_LEN 0x40
+#define SCHN_STAT_PROG_CHECK 0x20
+#define SCHN_STAT_PROT_CHECK 0x10
+#define SCHN_STAT_CHN_DATA_CHK 0x08
+#define SCHN_STAT_CHN_CTRL_CHK 0x04
+#define SCHN_STAT_INTF_CTRL_CHK 0x02
+#define SCHN_STAT_CHAIN_CHECK 0x01
+
+/*
+ * architectured values for first sense byte
+ */
+#define SNS0_CMD_REJECT 0x80
+#define SNS_CMD_REJECT SNS0_CMD_REJEC
+#define SNS0_INTERVENTION_REQ 0x40
+#define SNS0_BUS_OUT_CHECK 0x20
+#define SNS0_EQUIPMENT_CHECK 0x10
+#define SNS0_DATA_CHECK 0x08
+#define SNS0_OVERRUN 0x04
+#define SNS0_INCOMPL_DOMAIN 0x01
+
+/*
+ * architectured values for second sense byte
+ */
+#define SNS1_PERM_ERR 0x80
+#define SNS1_INV_TRACK_FORMAT 0x40
+#define SNS1_EOC 0x20
+#define SNS1_MESSAGE_TO_OPER 0x10
+#define SNS1_NO_REC_FOUND 0x08
+#define SNS1_FILE_PROTECTED 0x04
+#define SNS1_WRITE_INHIBITED 0x02
+#define SNS1_INPRECISE_END 0x01
+
+/*
+ * architectured values for third sense byte
+ */
+#define SNS2_REQ_INH_WRITE 0x80
+#define SNS2_CORRECTABLE 0x40
+#define SNS2_FIRST_LOG_ERR 0x20
+#define SNS2_ENV_DATA_PRESENT 0x10
+#define SNS2_INPRECISE_END 0x04
+
+/**
+ * scsw_is_tm - check for transport mode scsw
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the specified scsw is a transport mode scsw, zero
+ * otherwise.
+ */
+static inline int scsw_is_tm(union scsw *scsw)
+{
+ return css_general_characteristics.fcx && (scsw->tm.x == 1);
+}
+
+/**
+ * scsw_key - return scsw key field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the key field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_key(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.key;
+ else
+ return scsw->cmd.key;
+}
+
+/**
+ * scsw_eswf - return scsw eswf field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the eswf field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_eswf(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.eswf;
+ else
+ return scsw->cmd.eswf;
+}
+
+/**
+ * scsw_cc - return scsw cc field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the cc field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_cc(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.cc;
+ else
+ return scsw->cmd.cc;
+}
+
+/**
+ * scsw_ectl - return scsw ectl field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the ectl field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_ectl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.ectl;
+ else
+ return scsw->cmd.ectl;
+}
+
+/**
+ * scsw_pno - return scsw pno field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the pno field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_pno(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.pno;
+ else
+ return scsw->cmd.pno;
+}
+
+/**
+ * scsw_fctl - return scsw fctl field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the fctl field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_fctl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.fctl;
+ else
+ return scsw->cmd.fctl;
+}
+
+/**
+ * scsw_actl - return scsw actl field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the actl field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_actl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.actl;
+ else
+ return scsw->cmd.actl;
+}
+
+/**
+ * scsw_stctl - return scsw stctl field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the stctl field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_stctl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.stctl;
+ else
+ return scsw->cmd.stctl;
+}
+
+/**
+ * scsw_dstat - return scsw dstat field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the dstat field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_dstat(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.dstat;
+ else
+ return scsw->cmd.dstat;
+}
+
+/**
+ * scsw_cstat - return scsw cstat field
+ * @scsw: pointer to scsw
+ *
+ * Return the value of the cstat field of the specified scsw, regardless of
+ * whether it is a transport mode or command mode scsw.
+ */
+static inline u32 scsw_cstat(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw->tm.cstat;
+ else
+ return scsw->cmd.cstat;
+}
+
+/**
+ * scsw_cmd_is_valid_key - check key field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the key field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_key(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_sctl - check fctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fctl field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_sctl(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_eswf - check eswf field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the eswf field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_eswf(union scsw *scsw)
+{
+ return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
+}
+
+/**
+ * scsw_cmd_is_valid_cc - check cc field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cc field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_cc(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
+ (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
+}
+
+/**
+ * scsw_cmd_is_valid_fmt - check fmt field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fmt field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_fmt(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_pfch - check pfch field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the pfch field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_pfch(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_isic - check isic field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the isic field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_isic(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_alcc - check alcc field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the alcc field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_alcc(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_ssi - check ssi field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the ssi field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_ssi(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_cmd_is_valid_zcc - check zcc field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the zcc field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_zcc(union scsw *scsw)
+{
+ return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
+ (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
+}
+
+/**
+ * scsw_cmd_is_valid_ectl - check ectl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the ectl field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_ectl(union scsw *scsw)
+{
+ return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
+ !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
+ (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
+}
+
+/**
+ * scsw_cmd_is_valid_pno - check pno field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the pno field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_pno(union scsw *scsw)
+{
+ return (scsw->cmd.fctl != 0) &&
+ (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
+ ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
+ (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
+}
+
+/**
+ * scsw_cmd_is_valid_fctl - check fctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fctl field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_fctl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_cmd_is_valid_actl - check actl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the actl field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_actl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_cmd_is_valid_stctl - check stctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the stctl field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_stctl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_cmd_is_valid_dstat - check dstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the dstat field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_dstat(union scsw *scsw)
+{
+ return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (scsw->cmd.cc != 3);
+}
+
+/**
+ * scsw_cmd_is_valid_cstat - check cstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cstat field of the specified command mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_cmd_is_valid_cstat(union scsw *scsw)
+{
+ return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (scsw->cmd.cc != 3);
+}
+
+/**
+ * scsw_tm_is_valid_key - check key field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the key field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_key(union scsw *scsw)
+{
+ return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
+}
+
+/**
+ * scsw_tm_is_valid_eswf - check eswf field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the eswf field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_eswf(union scsw *scsw)
+{
+ return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
+}
+
+/**
+ * scsw_tm_is_valid_cc - check cc field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cc field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_cc(union scsw *scsw)
+{
+ return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
+ (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
+}
+
+/**
+ * scsw_tm_is_valid_fmt - check fmt field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fmt field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_fmt(union scsw *scsw)
+{
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_x - check x field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the x field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_x(union scsw *scsw)
+{
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_q - check q field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the q field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_q(union scsw *scsw)
+{
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_ectl - check ectl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the ectl field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_ectl(union scsw *scsw)
+{
+ return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
+ !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
+ (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
+}
+
+/**
+ * scsw_tm_is_valid_pno - check pno field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the pno field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_pno(union scsw *scsw)
+{
+ return (scsw->tm.fctl != 0) &&
+ (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
+ ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
+ (scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
+}
+
+/**
+ * scsw_tm_is_valid_fctl - check fctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fctl field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_fctl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_actl - check actl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the actl field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_actl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_stctl - check stctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the stctl field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_stctl(union scsw *scsw)
+{
+ /* Only valid if pmcw.dnv == 1*/
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_dstat - check dstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the dstat field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_dstat(union scsw *scsw)
+{
+ return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (scsw->tm.cc != 3);
+}
+
+/**
+ * scsw_tm_is_valid_cstat - check cstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cstat field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_cstat(union scsw *scsw)
+{
+ return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
+ (scsw->tm.cc != 3);
+}
+
+/**
+ * scsw_tm_is_valid_fcxs - check fcxs field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fcxs field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_fcxs(union scsw *scsw)
+{
+ return 1;
+}
+
+/**
+ * scsw_tm_is_valid_schxs - check schxs field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the schxs field of the specified transport mode scsw is
+ * valid, zero otherwise.
+ */
+static inline int scsw_tm_is_valid_schxs(union scsw *scsw)
+{
+ return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
+ SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_PROT_CHECK |
+ SCHN_STAT_CHN_DATA_CHK));
+}
+
+/**
+ * scsw_is_valid_actl - check actl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the actl field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_actl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_actl(scsw);
+ else
+ return scsw_cmd_is_valid_actl(scsw);
+}
+
+/**
+ * scsw_is_valid_cc - check cc field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cc field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_cc(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_cc(scsw);
+ else
+ return scsw_cmd_is_valid_cc(scsw);
+}
+
+/**
+ * scsw_is_valid_cstat - check cstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the cstat field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_cstat(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_cstat(scsw);
+ else
+ return scsw_cmd_is_valid_cstat(scsw);
+}
+
+/**
+ * scsw_is_valid_dstat - check dstat field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the dstat field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_dstat(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_dstat(scsw);
+ else
+ return scsw_cmd_is_valid_dstat(scsw);
+}
+
+/**
+ * scsw_is_valid_ectl - check ectl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the ectl field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_ectl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_ectl(scsw);
+ else
+ return scsw_cmd_is_valid_ectl(scsw);
+}
+
+/**
+ * scsw_is_valid_eswf - check eswf field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the eswf field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_eswf(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_eswf(scsw);
+ else
+ return scsw_cmd_is_valid_eswf(scsw);
+}
+
+/**
+ * scsw_is_valid_fctl - check fctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the fctl field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_fctl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_fctl(scsw);
+ else
+ return scsw_cmd_is_valid_fctl(scsw);
+}
+
+/**
+ * scsw_is_valid_key - check key field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the key field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_key(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_key(scsw);
+ else
+ return scsw_cmd_is_valid_key(scsw);
+}
+
+/**
+ * scsw_is_valid_pno - check pno field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the pno field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_pno(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_pno(scsw);
+ else
+ return scsw_cmd_is_valid_pno(scsw);
+}
+
+/**
+ * scsw_is_valid_stctl - check stctl field validity
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the stctl field of the specified scsw is valid,
+ * regardless of whether it is a transport mode or command mode scsw.
+ * Return zero if the field does not contain a valid value.
+ */
+static inline int scsw_is_valid_stctl(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_valid_stctl(scsw);
+ else
+ return scsw_cmd_is_valid_stctl(scsw);
+}
+
+/**
+ * scsw_cmd_is_solicited - check for solicited scsw
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the command mode scsw indicates that the associated
+ * status condition is solicited, zero if it is unsolicited.
+ */
+static inline int scsw_cmd_is_solicited(union scsw *scsw)
+{
+ return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
+}
+
+/**
+ * scsw_tm_is_solicited - check for solicited scsw
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the transport mode scsw indicates that the associated
+ * status condition is solicited, zero if it is unsolicited.
+ */
+static inline int scsw_tm_is_solicited(union scsw *scsw)
+{
+ return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
+}
+
+/**
+ * scsw_is_solicited - check for solicited scsw
+ * @scsw: pointer to scsw
+ *
+ * Return non-zero if the transport or command mode scsw indicates that the
+ * associated status condition is solicited, zero if it is unsolicited.
+ */
+static inline int scsw_is_solicited(union scsw *scsw)
+{
+ if (scsw_is_tm(scsw))
+ return scsw_tm_is_solicited(scsw);
+ else
+ return scsw_cmd_is_solicited(scsw);
+}
+
+#endif /* _ASM_S390_SCSW_H_ */
diff --git a/kernel/arch/s390/include/asm/seccomp.h b/kernel/arch/s390/include/asm/seccomp.h
new file mode 100644
index 000000000..781a9cf9b
--- /dev/null
+++ b/kernel/arch/s390/include/asm/seccomp.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_S390_SECCOMP_H
+#define _ASM_S390_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#define __NR_seccomp_sigreturn __NR_sigreturn
+
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+
+#endif /* _ASM_S390_SECCOMP_H */
diff --git a/kernel/arch/s390/include/asm/sections.h b/kernel/arch/s390/include/asm/sections.h
new file mode 100644
index 000000000..fbd9116eb
--- /dev/null
+++ b/kernel/arch/s390/include/asm/sections.h
@@ -0,0 +1,8 @@
+#ifndef _S390_SECTIONS_H
+#define _S390_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _eshared[], _ehead[];
+
+#endif
diff --git a/kernel/arch/s390/include/asm/segment.h b/kernel/arch/s390/include/asm/segment.h
new file mode 100644
index 000000000..8bfce3475
--- /dev/null
+++ b/kernel/arch/s390/include/asm/segment.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+#endif
diff --git a/kernel/arch/s390/include/asm/serial.h b/kernel/arch/s390/include/asm/serial.h
new file mode 100644
index 000000000..5b3e48ef5
--- /dev/null
+++ b/kernel/arch/s390/include/asm/serial.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_S390_SERIAL_H
+#define _ASM_S390_SERIAL_H
+
+#define BASE_BAUD 0
+
+#endif /* _ASM_S390_SERIAL_H */
diff --git a/kernel/arch/s390/include/asm/setup.h b/kernel/arch/s390/include/asm/setup.h
new file mode 100644
index 000000000..b8ffc1bd0
--- /dev/null
+++ b/kernel/arch/s390/include/asm/setup.h
@@ -0,0 +1,119 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999, 2010
+ */
+#ifndef _ASM_S390_SETUP_H
+#define _ASM_S390_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+
+#define PARMAREA 0x10400
+
+#ifndef __ASSEMBLY__
+
+#include <asm/lowcore.h>
+#include <asm/types.h>
+
+#define IPL_DEVICE (*(unsigned long *) (0x10400))
+#define INITRD_START (*(unsigned long *) (0x10408))
+#define INITRD_SIZE (*(unsigned long *) (0x10410))
+#define OLDMEM_BASE (*(unsigned long *) (0x10418))
+#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
+#define COMMAND_LINE ((char *) (0x10480))
+
+extern int memory_end_set;
+extern unsigned long memory_end;
+extern unsigned long max_physmem_end;
+
+extern void detect_memory_memblock(void);
+
+/*
+ * Machine features detected in head.S
+ */
+
+#define MACHINE_FLAG_VM (1UL << 0)
+#define MACHINE_FLAG_IEEE (1UL << 1)
+#define MACHINE_FLAG_CSP (1UL << 2)
+#define MACHINE_FLAG_MVPG (1UL << 3)
+#define MACHINE_FLAG_DIAG44 (1UL << 4)
+#define MACHINE_FLAG_IDTE (1UL << 5)
+#define MACHINE_FLAG_DIAG9C (1UL << 6)
+#define MACHINE_FLAG_KVM (1UL << 8)
+#define MACHINE_FLAG_ESOP (1UL << 9)
+#define MACHINE_FLAG_EDAT1 (1UL << 10)
+#define MACHINE_FLAG_EDAT2 (1UL << 11)
+#define MACHINE_FLAG_LPAR (1UL << 12)
+#define MACHINE_FLAG_LPP (1UL << 13)
+#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
+#define MACHINE_FLAG_TE (1UL << 15)
+#define MACHINE_FLAG_TLB_LC (1UL << 17)
+#define MACHINE_FLAG_VX (1UL << 18)
+#define MACHINE_FLAG_CAD (1UL << 19)
+
+#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
+#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
+#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
+
+#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
+#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
+#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1
+#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1
+
+#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
+#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
+#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
+#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
+#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
+#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
+#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
+#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
+#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
+#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
+
+/*
+ * Console mode. Override with conmode=
+ */
+extern unsigned int console_mode;
+extern unsigned int console_devno;
+extern unsigned int console_irq;
+
+extern char vmhalt_cmd[];
+extern char vmpoff_cmd[];
+
+#define CONSOLE_IS_UNDEFINED (console_mode == 0)
+#define CONSOLE_IS_SCLP (console_mode == 1)
+#define CONSOLE_IS_3215 (console_mode == 2)
+#define CONSOLE_IS_3270 (console_mode == 3)
+#define SET_CONSOLE_SCLP do { console_mode = 1; } while (0)
+#define SET_CONSOLE_3215 do { console_mode = 2; } while (0)
+#define SET_CONSOLE_3270 do { console_mode = 3; } while (0)
+
+#define NSS_NAME_SIZE 8
+extern char kernel_nss_name[];
+
+#ifdef CONFIG_PFAULT
+extern int pfault_init(void);
+extern void pfault_fini(void);
+#else /* CONFIG_PFAULT */
+#define pfault_init() ({-1;})
+#define pfault_fini() do { } while (0)
+#endif /* CONFIG_PFAULT */
+
+extern void cmma_init(void);
+
+extern void (*_machine_restart)(char *command);
+extern void (*_machine_halt)(void);
+extern void (*_machine_power_off)(void);
+
+#else /* __ASSEMBLY__ */
+
+#define IPL_DEVICE 0x10400
+#define INITRD_START 0x10408
+#define INITRD_SIZE 0x10410
+#define OLDMEM_BASE 0x10418
+#define OLDMEM_SIZE 0x10420
+#define COMMAND_LINE 0x10480
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_S390_SETUP_H */
diff --git a/kernel/arch/s390/include/asm/sfp-machine.h b/kernel/arch/s390/include/asm/sfp-machine.h
new file mode 100644
index 000000000..4e16aede4
--- /dev/null
+++ b/kernel/arch/s390/include/asm/sfp-machine.h
@@ -0,0 +1,142 @@
+/* Machine-dependent software floating-point definitions.
+ S/390 kernel version.
+ Copyright (C) 1997,1998,1999 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com),
+ Jakub Jelinek (jj@ultra.linux.cz),
+ David S. Miller (davem@redhat.com) and
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, write to the Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#ifndef _SFP_MACHINE_H
+#define _SFP_MACHINE_H
+
+
+#define _FP_W_TYPE_SIZE 32
+#define _FP_W_TYPE unsigned int
+#define _FP_WS_TYPE signed int
+#define _FP_I_TYPE int
+
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
+#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
+#define _FP_NANSIGN_S 0
+#define _FP_NANSIGN_D 0
+#define _FP_NANSIGN_Q 0
+
+#define _FP_KEEPNANFRACP 1
+
+/*
+ * If one NaN is signaling and the other is not,
+ * we choose that one, otherwise we choose X.
+ */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ else \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+/* Some assembly to speed things up. */
+#define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) ({ \
+ unsigned int __r2 = (x2) + (y2); \
+ unsigned int __r1 = (x1); \
+ unsigned int __r0 = (x0); \
+ asm volatile( \
+ " alr %2,%3\n" \
+ " brc 12,0f\n" \
+ " lhi 0,1\n" \
+ " alr %1,0\n" \
+ " brc 12,0f\n" \
+ " alr %0,0\n" \
+ "0:" \
+ : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
+ : "d" (y0), "i" (1) : "cc", "0" ); \
+ asm volatile( \
+ " alr %1,%2\n" \
+ " brc 12,0f\n" \
+ " ahi %0,1\n" \
+ "0:" \
+ : "+&d" (__r2), "+&d" (__r1) \
+ : "d" (y1) : "cc"); \
+ (r2) = __r2; \
+ (r1) = __r1; \
+ (r0) = __r0; \
+})
+
+#define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) ({ \
+ unsigned int __r2 = (x2) - (y2); \
+ unsigned int __r1 = (x1); \
+ unsigned int __r0 = (x0); \
+ asm volatile( \
+ " slr %2,%3\n" \
+ " brc 3,0f\n" \
+ " lhi 0,1\n" \
+ " slr %1,0\n" \
+ " brc 3,0f\n" \
+ " slr %0,0\n" \
+ "0:" \
+ : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
+ : "d" (y0) : "cc", "0"); \
+ asm volatile( \
+ " slr %1,%2\n" \
+ " brc 3,0f\n" \
+ " ahi %0,-1\n" \
+ "0:" \
+ : "+&d" (__r2), "+&d" (__r1) \
+ : "d" (y1) : "cc"); \
+ (r2) = __r2; \
+ (r1) = __r1; \
+ (r0) = __r0; \
+})
+
+#define __FP_FRAC_DEC_3(x2,x1,x0,y2,y1,y0) __FP_FRAC_SUB_3(x2,x1,x0,x2,x1,x0,y2,y1,y0)
+
+/* Obtain the current rounding mode. */
+#define FP_ROUNDMODE mode
+
+/* Exception flags. */
+#define FP_EX_INVALID 0x800000
+#define FP_EX_DIVZERO 0x400000
+#define FP_EX_OVERFLOW 0x200000
+#define FP_EX_UNDERFLOW 0x100000
+#define FP_EX_INEXACT 0x080000
+
+/* We write the results always */
+#define FP_INHIBIT_RESULTS 0
+
+#endif
diff --git a/kernel/arch/s390/include/asm/sfp-util.h b/kernel/arch/s390/include/asm/sfp-util.h
new file mode 100644
index 000000000..c8b7cf9d6
--- /dev/null
+++ b/kernel/arch/s390/include/asm/sfp-util.h
@@ -0,0 +1,67 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \
+ unsigned int __sh = (ah); \
+ unsigned int __sl = (al); \
+ asm volatile( \
+ " alr %1,%3\n" \
+ " brc 12,0f\n" \
+ " ahi %0,1\n" \
+ "0: alr %0,%2" \
+ : "+&d" (__sh), "+d" (__sl) \
+ : "d" (bh), "d" (bl) : "cc"); \
+ (sh) = __sh; \
+ (sl) = __sl; \
+})
+
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \
+ unsigned int __sh = (ah); \
+ unsigned int __sl = (al); \
+ asm volatile( \
+ " slr %1,%3\n" \
+ " brc 3,0f\n" \
+ " ahi %0,-1\n" \
+ "0: slr %0,%2" \
+ : "+&d" (__sh), "+d" (__sl) \
+ : "d" (bh), "d" (bl) : "cc"); \
+ (sh) = __sh; \
+ (sl) = __sl; \
+})
+
+/* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */
+#define umul_ppmm(wh, wl, u, v) ({ \
+ unsigned int __wh = u; \
+ unsigned int __wl = v; \
+ asm volatile( \
+ " ltr 1,%0\n" \
+ " mr 0,%1\n" \
+ " jnm 0f\n" \
+ " alr 0,%1\n" \
+ "0: ltr %1,%1\n" \
+ " jnm 1f\n" \
+ " alr 0,%0\n" \
+ "1: lr %0,0\n" \
+ " lr %1,1\n" \
+ : "+d" (__wh), "+d" (__wl) \
+ : : "0", "1", "cc"); \
+ wh = __wh; \
+ wl = __wl; \
+})
+
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { unsigned long __n; \
+ unsigned int __r, __d; \
+ __n = ((unsigned long)(n1) << 32) + n0; \
+ __d = (d); \
+ (q) = __n / __d; \
+ (r) = __n % __d; \
+ } while (0)
+
+#define UDIV_NEEDS_NORMALIZATION 0
+
+#define abort() BUG()
+
+#define __BYTE_ORDER __BIG_ENDIAN
diff --git a/kernel/arch/s390/include/asm/shmparam.h b/kernel/arch/s390/include/asm/shmparam.h
new file mode 100644
index 000000000..e98518273
--- /dev/null
+++ b/kernel/arch/s390/include/asm/shmparam.h
@@ -0,0 +1,11 @@
+/*
+ * S390 version
+ *
+ * Derived from "include/asm-i386/shmparam.h"
+ */
+#ifndef _ASM_S390_SHMPARAM_H
+#define _ASM_S390_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* _ASM_S390_SHMPARAM_H */
diff --git a/kernel/arch/s390/include/asm/signal.h b/kernel/arch/s390/include/asm/signal.h
new file mode 100644
index 000000000..abf9e5735
--- /dev/null
+++ b/kernel/arch/s390/include/asm/signal.h
@@ -0,0 +1,25 @@
+/*
+ * S390 version
+ *
+ * Derived from "include/asm-i386/signal.h"
+ */
+#ifndef _ASMS390_SIGNAL_H
+#define _ASMS390_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+#include <asm/sigcontext.h>
+#define _NSIG _SIGCONTEXT_NSIG
+#define _NSIG_BPW _SIGCONTEXT_NSIG_BPW
+#define _NSIG_WORDS _SIGCONTEXT_NSIG_WORDS
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#define __ARCH_HAS_SA_RESTORER
+#endif
diff --git a/kernel/arch/s390/include/asm/sigp.h b/kernel/arch/s390/include/asm/sigp.h
new file mode 100644
index 000000000..ec60cf7fa
--- /dev/null
+++ b/kernel/arch/s390/include/asm/sigp.h
@@ -0,0 +1,57 @@
+#ifndef __S390_ASM_SIGP_H
+#define __S390_ASM_SIGP_H
+
+/* SIGP order codes */
+#define SIGP_SENSE 1
+#define SIGP_EXTERNAL_CALL 2
+#define SIGP_EMERGENCY_SIGNAL 3
+#define SIGP_START 4
+#define SIGP_STOP 5
+#define SIGP_RESTART 6
+#define SIGP_STOP_AND_STORE_STATUS 9
+#define SIGP_INITIAL_CPU_RESET 11
+#define SIGP_CPU_RESET 12
+#define SIGP_SET_PREFIX 13
+#define SIGP_STORE_STATUS_AT_ADDRESS 14
+#define SIGP_SET_ARCHITECTURE 18
+#define SIGP_COND_EMERGENCY_SIGNAL 19
+#define SIGP_SENSE_RUNNING 21
+#define SIGP_SET_MULTI_THREADING 22
+#define SIGP_STORE_ADDITIONAL_STATUS 23
+
+/* SIGP condition codes */
+#define SIGP_CC_ORDER_CODE_ACCEPTED 0
+#define SIGP_CC_STATUS_STORED 1
+#define SIGP_CC_BUSY 2
+#define SIGP_CC_NOT_OPERATIONAL 3
+
+/* SIGP cpu status bits */
+
+#define SIGP_STATUS_CHECK_STOP 0x00000010UL
+#define SIGP_STATUS_STOPPED 0x00000040UL
+#define SIGP_STATUS_EXT_CALL_PENDING 0x00000080UL
+#define SIGP_STATUS_INVALID_PARAMETER 0x00000100UL
+#define SIGP_STATUS_INCORRECT_STATE 0x00000200UL
+#define SIGP_STATUS_NOT_RUNNING 0x00000400UL
+
+#ifndef __ASSEMBLY__
+
+static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
+ u32 *status)
+{
+ register unsigned long reg1 asm ("1") = parm;
+ int cc;
+
+ asm volatile(
+ " sigp %1,%2,0(%3)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
+ if (status && cc == 1)
+ *status = reg1;
+ return cc;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __S390_ASM_SIGP_H */
diff --git a/kernel/arch/s390/include/asm/smp.h b/kernel/arch/s390/include/asm/smp.h
new file mode 100644
index 000000000..b3bd0282d
--- /dev/null
+++ b/kernel/arch/s390/include/asm/smp.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright IBM Corp. 1999, 2012
+ * Author(s): Denis Joseph Barrow,
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Heiko Carstens <heiko.carstens@de.ibm.com>,
+ */
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#include <asm/sigp.h>
+
+#ifdef CONFIG_SMP
+
+#include <asm/lowcore.h>
+
+#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
+
+extern struct mutex smp_cpu_state_mutex;
+extern unsigned int smp_cpu_mt_shift;
+extern unsigned int smp_cpu_mtid;
+
+extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+extern void smp_call_online_cpu(void (*func)(void *), void *);
+extern void smp_call_ipl_cpu(void (*func)(void *), void *);
+
+extern int smp_find_processor_id(u16 address);
+extern int smp_store_status(int cpu);
+extern int smp_vcpu_scheduled(int cpu);
+extern void smp_yield_cpu(int cpu);
+extern void smp_cpu_set_polarization(int cpu, int val);
+extern int smp_cpu_get_polarization(int cpu);
+extern void smp_fill_possible_mask(void);
+
+#else /* CONFIG_SMP */
+
+#define smp_cpu_mtid 0
+
+static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
+{
+ func(data);
+}
+
+static inline void smp_call_online_cpu(void (*func)(void *), void *data)
+{
+ func(data);
+}
+
+static inline int smp_find_processor_id(u16 address) { return 0; }
+static inline int smp_store_status(int cpu) { return 0; }
+static inline int smp_vcpu_scheduled(int cpu) { return 1; }
+static inline void smp_yield_cpu(int cpu) { }
+static inline void smp_fill_possible_mask(void) { }
+
+#endif /* CONFIG_SMP */
+
+static inline void smp_stop_cpu(void)
+{
+ u16 pcpu = stap();
+
+ for (;;) {
+ __pcpu_sigp(pcpu, SIGP_STOP, 0, NULL);
+ cpu_relax();
+ }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int smp_rescan_cpus(void);
+extern void __noreturn cpu_die(void);
+extern void __cpu_die(unsigned int cpu);
+extern int __cpu_disable(void);
+#else
+static inline int smp_rescan_cpus(void) { return 0; }
+static inline void cpu_die(void) { }
+#endif
+
+#endif /* __ASM_SMP_H */
diff --git a/kernel/arch/s390/include/asm/sparsemem.h b/kernel/arch/s390/include/asm/sparsemem.h
new file mode 100644
index 000000000..487428b6d
--- /dev/null
+++ b/kernel/arch/s390/include/asm/sparsemem.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_S390_SPARSEMEM_H
+#define _ASM_S390_SPARSEMEM_H
+
+#define SECTION_SIZE_BITS 28
+#define MAX_PHYSMEM_BITS 46
+
+#endif /* _ASM_S390_SPARSEMEM_H */
diff --git a/kernel/arch/s390/include/asm/spinlock.h b/kernel/arch/s390/include/asm/spinlock.h
new file mode 100644
index 000000000..0e37cd041
--- /dev/null
+++ b/kernel/arch/s390/include/asm/spinlock.h
@@ -0,0 +1,280 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/spinlock.h"
+ */
+
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <linux/smp.h>
+
+#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
+
+extern int spin_retry;
+
+static inline int
+_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
+{
+ return __sync_bool_compare_and_swap(lock, old, new);
+}
+
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
+ */
+
+void arch_lock_relax(unsigned int cpu);
+
+void arch_spin_lock_wait(arch_spinlock_t *);
+int arch_spin_trylock_retry(arch_spinlock_t *);
+void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
+
+static inline void arch_spin_relax(arch_spinlock_t *lock)
+{
+ arch_lock_relax(lock->lock);
+}
+
+static inline u32 arch_spin_lockval(int cpu)
+{
+ return ~cpu;
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.lock == 0;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lp)
+{
+ return ACCESS_ONCE(lp->lock) != 0;
+}
+
+static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
+{
+ barrier();
+ return likely(arch_spin_value_unlocked(*lp) &&
+ _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lp)
+{
+ if (!arch_spin_trylock_once(lp))
+ arch_spin_lock_wait(lp);
+}
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
+ unsigned long flags)
+{
+ if (!arch_spin_trylock_once(lp))
+ arch_spin_lock_wait_flags(lp, flags);
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lp)
+{
+ if (!arch_spin_trylock_once(lp))
+ return arch_spin_trylock_retry(lp);
+ return 1;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lp)
+{
+ typecheck(unsigned int, lp->lock);
+ asm volatile(
+ __ASM_BARRIER
+ "st %1,%0\n"
+ : "+Q" (lp->lock)
+ : "d" (0)
+ : "cc", "memory");
+}
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ while (arch_spin_is_locked(lock))
+ arch_spin_relax(lock);
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == 0)
+
+extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
+extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+static inline int arch_read_trylock_once(arch_rwlock_t *rw)
+{
+ unsigned int old = ACCESS_ONCE(rw->lock);
+ return likely((int) old >= 0 &&
+ _raw_compare_and_swap(&rw->lock, old, old + 1));
+}
+
+static inline int arch_write_trylock_once(arch_rwlock_t *rw)
+{
+ unsigned int old = ACCESS_ONCE(rw->lock);
+ return likely(old == 0 &&
+ _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
+}
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __RAW_OP_OR "lao"
+#define __RAW_OP_AND "lan"
+#define __RAW_OP_ADD "laa"
+
+#define __RAW_LOCK(ptr, op_val, op_string) \
+({ \
+ unsigned int old_val; \
+ \
+ typecheck(unsigned int *, ptr); \
+ asm volatile( \
+ op_string " %0,%2,%1\n" \
+ "bcr 14,0\n" \
+ : "=d" (old_val), "+Q" (*ptr) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#define __RAW_UNLOCK(ptr, op_val, op_string) \
+({ \
+ unsigned int old_val; \
+ \
+ typecheck(unsigned int *, ptr); \
+ asm volatile( \
+ "bcr 14,0\n" \
+ op_string " %0,%2,%1\n" \
+ : "=d" (old_val), "+Q" (*ptr) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned int old;
+
+ old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
+ if ((int) old < 0)
+ _raw_read_lock_wait(rw);
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned int old;
+
+ old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
+ if (old != 0)
+ _raw_write_lock_wait(rw, old);
+ rw->owner = SPINLOCK_LOCKVAL;
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ rw->owner = 0;
+ __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp);
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ if (!arch_read_trylock_once(rw))
+ _raw_read_lock_wait(rw);
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned int old;
+
+ do {
+ old = ACCESS_ONCE(rw->lock);
+ } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ if (!arch_write_trylock_once(rw))
+ _raw_write_lock_wait(rw);
+ rw->owner = SPINLOCK_LOCKVAL;
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ typecheck(unsigned int, rw->lock);
+
+ rw->owner = 0;
+ asm volatile(
+ __ASM_BARRIER
+ "st %1,%0\n"
+ : "+Q" (rw->lock)
+ : "d" (0)
+ : "cc", "memory");
+}
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ if (!arch_read_trylock_once(rw))
+ return _raw_read_trylock_retry(rw);
+ return 1;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
+ return 0;
+ rw->owner = SPINLOCK_LOCKVAL;
+ return 1;
+}
+
+static inline void arch_read_relax(arch_rwlock_t *rw)
+{
+ arch_lock_relax(rw->owner);
+}
+
+static inline void arch_write_relax(arch_rwlock_t *rw)
+{
+ arch_lock_relax(rw->owner);
+}
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/kernel/arch/s390/include/asm/spinlock_types.h b/kernel/arch/s390/include/asm/spinlock_types.h
new file mode 100644
index 000000000..d84b69392
--- /dev/null
+++ b/kernel/arch/s390/include/asm/spinlock_types.h
@@ -0,0 +1,21 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ unsigned int lock;
+} __attribute__ ((aligned (4))) arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
+
+typedef struct {
+ unsigned int lock;
+ unsigned int owner;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/kernel/arch/s390/include/asm/string.h b/kernel/arch/s390/include/asm/string.h
new file mode 100644
index 000000000..8662f5c8e
--- /dev/null
+++ b/kernel/arch/s390/include/asm/string.h
@@ -0,0 +1,142 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#ifndef _S390_STRING_H_
+#define _S390_STRING_H_
+
+#ifndef _LINUX_TYPES_H
+#include <linux/types.h>
+#endif
+
+#define __HAVE_ARCH_MEMCHR /* inline & arch function */
+#define __HAVE_ARCH_MEMCMP /* arch function */
+#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */
+#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
+#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
+#define __HAVE_ARCH_STRCAT /* inline & arch function */
+#define __HAVE_ARCH_STRCMP /* arch function */
+#define __HAVE_ARCH_STRCPY /* inline & arch function */
+#define __HAVE_ARCH_STRLCAT /* arch function */
+#define __HAVE_ARCH_STRLCPY /* arch function */
+#define __HAVE_ARCH_STRLEN /* inline & arch function */
+#define __HAVE_ARCH_STRNCAT /* arch function */
+#define __HAVE_ARCH_STRNCPY /* arch function */
+#define __HAVE_ARCH_STRNLEN /* inline & arch function */
+#define __HAVE_ARCH_STRRCHR /* arch function */
+#define __HAVE_ARCH_STRSTR /* arch function */
+
+/* Prototypes for non-inlined arch strings functions. */
+extern int memcmp(const void *, const void *, size_t);
+extern void *memcpy(void *, const void *, size_t);
+extern void *memset(void *, int, size_t);
+extern int strcmp(const char *,const char *);
+extern size_t strlcat(char *, const char *, size_t);
+extern size_t strlcpy(char *, const char *, size_t);
+extern char *strncat(char *, const char *, size_t);
+extern char *strncpy(char *, const char *, size_t);
+extern char *strrchr(const char *, int);
+extern char *strstr(const char *, const char *);
+
+#undef __HAVE_ARCH_MEMMOVE
+#undef __HAVE_ARCH_STRCHR
+#undef __HAVE_ARCH_STRNCHR
+#undef __HAVE_ARCH_STRNCMP
+#undef __HAVE_ARCH_STRPBRK
+#undef __HAVE_ARCH_STRSEP
+#undef __HAVE_ARCH_STRSPN
+
+#if !defined(IN_ARCH_STRING_C)
+
+static inline void *memchr(const void * s, int c, size_t n)
+{
+ register int r0 asm("0") = (char) c;
+ const void *ret = s + n;
+
+ asm volatile(
+ "0: srst %0,%1\n"
+ " jo 0b\n"
+ " jl 1f\n"
+ " la %0,0\n"
+ "1:"
+ : "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
+ return (void *) ret;
+}
+
+static inline void *memscan(void *s, int c, size_t n)
+{
+ register int r0 asm("0") = (char) c;
+ const void *ret = s + n;
+
+ asm volatile(
+ "0: srst %0,%1\n"
+ " jo 0b\n"
+ : "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
+ return (void *) ret;
+}
+
+static inline char *strcat(char *dst, const char *src)
+{
+ register int r0 asm("0") = 0;
+ unsigned long dummy;
+ char *ret = dst;
+
+ asm volatile(
+ "0: srst %0,%1\n"
+ " jo 0b\n"
+ "1: mvst %0,%2\n"
+ " jo 1b"
+ : "=&a" (dummy), "+a" (dst), "+a" (src)
+ : "d" (r0), "0" (0) : "cc", "memory" );
+ return ret;
+}
+
+static inline char *strcpy(char *dst, const char *src)
+{
+ register int r0 asm("0") = 0;
+ char *ret = dst;
+
+ asm volatile(
+ "0: mvst %0,%1\n"
+ " jo 0b"
+ : "+&a" (dst), "+&a" (src) : "d" (r0)
+ : "cc", "memory");
+ return ret;
+}
+
+static inline size_t strlen(const char *s)
+{
+ register unsigned long r0 asm("0") = 0;
+ const char *tmp = s;
+
+ asm volatile(
+ "0: srst %0,%1\n"
+ " jo 0b"
+ : "+d" (r0), "+a" (tmp) : : "cc");
+ return r0 - (unsigned long) s;
+}
+
+static inline size_t strnlen(const char * s, size_t n)
+{
+ register int r0 asm("0") = 0;
+ const char *tmp = s;
+ const char *end = s + n;
+
+ asm volatile(
+ "0: srst %0,%1\n"
+ " jo 0b"
+ : "+a" (end), "+a" (tmp) : "d" (r0) : "cc");
+ return end - s;
+}
+#else /* IN_ARCH_STRING_C */
+void *memchr(const void * s, int c, size_t n);
+void *memscan(void *s, int c, size_t n);
+char *strcat(char *dst, const char *src);
+char *strcpy(char *dst, const char *src);
+size_t strlen(const char *s);
+size_t strnlen(const char * s, size_t n);
+#endif /* !IN_ARCH_STRING_C */
+
+#endif /* __S390_STRING_H_ */
diff --git a/kernel/arch/s390/include/asm/switch_to.h b/kernel/arch/s390/include/asm/switch_to.h
new file mode 100644
index 000000000..d62e7a696
--- /dev/null
+++ b/kernel/arch/s390/include/asm/switch_to.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __ASM_SWITCH_TO_H
+#define __ASM_SWITCH_TO_H
+
+#include <linux/thread_info.h>
+#include <asm/ptrace.h>
+
+extern struct task_struct *__switch_to(void *, void *);
+extern void update_cr_regs(struct task_struct *task);
+
+static inline int test_fp_ctl(u32 fpc)
+{
+ u32 orig_fpc;
+ int rc;
+
+ asm volatile(
+ " efpc %1\n"
+ " sfpc %2\n"
+ "0: sfpc %1\n"
+ " la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "=d" (rc), "=d" (orig_fpc)
+ : "d" (fpc), "0" (-EINVAL));
+ return rc;
+}
+
+static inline void save_fp_ctl(u32 *fpc)
+{
+ asm volatile(
+ " stfpc %0\n"
+ : "+Q" (*fpc));
+}
+
+static inline int restore_fp_ctl(u32 *fpc)
+{
+ int rc;
+
+ asm volatile(
+ " lfpc %1\n"
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
+ return rc;
+}
+
+static inline void save_fp_regs(freg_t *fprs)
+{
+ asm volatile("std 0,%0" : "=Q" (fprs[0]));
+ asm volatile("std 2,%0" : "=Q" (fprs[2]));
+ asm volatile("std 4,%0" : "=Q" (fprs[4]));
+ asm volatile("std 6,%0" : "=Q" (fprs[6]));
+ asm volatile("std 1,%0" : "=Q" (fprs[1]));
+ asm volatile("std 3,%0" : "=Q" (fprs[3]));
+ asm volatile("std 5,%0" : "=Q" (fprs[5]));
+ asm volatile("std 7,%0" : "=Q" (fprs[7]));
+ asm volatile("std 8,%0" : "=Q" (fprs[8]));
+ asm volatile("std 9,%0" : "=Q" (fprs[9]));
+ asm volatile("std 10,%0" : "=Q" (fprs[10]));
+ asm volatile("std 11,%0" : "=Q" (fprs[11]));
+ asm volatile("std 12,%0" : "=Q" (fprs[12]));
+ asm volatile("std 13,%0" : "=Q" (fprs[13]));
+ asm volatile("std 14,%0" : "=Q" (fprs[14]));
+ asm volatile("std 15,%0" : "=Q" (fprs[15]));
+}
+
+static inline void restore_fp_regs(freg_t *fprs)
+{
+ asm volatile("ld 0,%0" : : "Q" (fprs[0]));
+ asm volatile("ld 2,%0" : : "Q" (fprs[2]));
+ asm volatile("ld 4,%0" : : "Q" (fprs[4]));
+ asm volatile("ld 6,%0" : : "Q" (fprs[6]));
+ asm volatile("ld 1,%0" : : "Q" (fprs[1]));
+ asm volatile("ld 3,%0" : : "Q" (fprs[3]));
+ asm volatile("ld 5,%0" : : "Q" (fprs[5]));
+ asm volatile("ld 7,%0" : : "Q" (fprs[7]));
+ asm volatile("ld 8,%0" : : "Q" (fprs[8]));
+ asm volatile("ld 9,%0" : : "Q" (fprs[9]));
+ asm volatile("ld 10,%0" : : "Q" (fprs[10]));
+ asm volatile("ld 11,%0" : : "Q" (fprs[11]));
+ asm volatile("ld 12,%0" : : "Q" (fprs[12]));
+ asm volatile("ld 13,%0" : : "Q" (fprs[13]));
+ asm volatile("ld 14,%0" : : "Q" (fprs[14]));
+ asm volatile("ld 15,%0" : : "Q" (fprs[15]));
+}
+
+static inline void save_vx_regs(__vector128 *vxrs)
+{
+ typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
+
+ asm volatile(
+ " la 1,%0\n"
+ " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
+ " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
+ : "=Q" (*(addrtype *) vxrs) : : "1");
+}
+
+static inline void save_vx_regs_safe(__vector128 *vxrs)
+{
+ unsigned long cr0, flags;
+
+ flags = arch_local_irq_save();
+ __ctl_store(cr0, 0, 0);
+ __ctl_set_bit(0, 17);
+ __ctl_set_bit(0, 18);
+ save_vx_regs(vxrs);
+ __ctl_load(cr0, 0, 0);
+ arch_local_irq_restore(flags);
+}
+
+static inline void restore_vx_regs(__vector128 *vxrs)
+{
+ typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
+
+ asm volatile(
+ " la 1,%0\n"
+ " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
+ " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
+ : : "Q" (*(addrtype *) vxrs) : "1");
+}
+
+static inline void save_fp_vx_regs(struct task_struct *task)
+{
+ if (task->thread.vxrs)
+ save_vx_regs(task->thread.vxrs);
+ else
+ save_fp_regs(task->thread.fp_regs.fprs);
+}
+
+static inline void restore_fp_vx_regs(struct task_struct *task)
+{
+ if (task->thread.vxrs)
+ restore_vx_regs(task->thread.vxrs);
+ else
+ restore_fp_regs(task->thread.fp_regs.fprs);
+}
+
+static inline void save_access_regs(unsigned int *acrs)
+{
+ typedef struct { int _[NUM_ACRS]; } acrstype;
+
+ asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
+}
+
+static inline void restore_access_regs(unsigned int *acrs)
+{
+ typedef struct { int _[NUM_ACRS]; } acrstype;
+
+ asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
+}
+
+#define switch_to(prev,next,last) do { \
+ if (prev->mm) { \
+ save_fp_ctl(&prev->thread.fp_regs.fpc); \
+ save_fp_vx_regs(prev); \
+ save_access_regs(&prev->thread.acrs[0]); \
+ save_ri_cb(prev->thread.ri_cb); \
+ } \
+ if (next->mm) { \
+ update_cr_regs(next); \
+ restore_fp_ctl(&next->thread.fp_regs.fpc); \
+ restore_fp_vx_regs(next); \
+ restore_access_regs(&next->thread.acrs[0]); \
+ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
+ } \
+ prev = __switch_to(prev,next); \
+} while (0)
+
+#endif /* __ASM_SWITCH_TO_H */
diff --git a/kernel/arch/s390/include/asm/syscall.h b/kernel/arch/s390/include/asm/syscall.h
new file mode 100644
index 000000000..6ba0bf928
--- /dev/null
+++ b/kernel/arch/s390/include/asm/syscall.h
@@ -0,0 +1,100 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H 1
+
+#include <uapi/linux/audit.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <asm/ptrace.h>
+
+/*
+ * The syscall table always contains 32 bit pointers since we know that the
+ * address of the function to be called is (way) below 4GB. So the "int"
+ * type here is what we want [need] for both 32 bit and 64 bit systems.
+ */
+extern const unsigned int sys_call_table[];
+extern const unsigned int sys_call_table_emu[];
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return test_pt_regs_flag(regs, PIF_SYSCALL) ?
+ (regs->int_code & 0xffff) : -1;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->gprs[2] = regs->orig_gpr2;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->gprs[2];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->gprs[2] = error ? error : val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ unsigned long mask = -1UL;
+
+ BUG_ON(i + n > 6);
+#ifdef CONFIG_COMPAT
+ if (test_tsk_thread_flag(task, TIF_31BIT))
+ mask = 0xffffffff;
+#endif
+ while (n-- > 0)
+ if (i + n > 0)
+ args[n] = regs->gprs[2 + i + n] & mask;
+ if (i == 0)
+ args[0] = regs->orig_gpr2 & mask;
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+ while (n-- > 0)
+ if (i + n > 0)
+ regs->gprs[2 + i + n] = args[n];
+ if (i == 0)
+ regs->orig_gpr2 = args[0];
+}
+
+static inline int syscall_get_arch(void)
+{
+#ifdef CONFIG_COMPAT
+ if (test_tsk_thread_flag(current, TIF_31BIT))
+ return AUDIT_ARCH_S390;
+#endif
+ return AUDIT_ARCH_S390X;
+}
+#endif /* _ASM_SYSCALL_H */
diff --git a/kernel/arch/s390/include/asm/sysinfo.h b/kernel/arch/s390/include/asm/sysinfo.h
new file mode 100644
index 000000000..f7054a892
--- /dev/null
+++ b/kernel/arch/s390/include/asm/sysinfo.h
@@ -0,0 +1,179 @@
+/*
+ * definition for store system information stsi
+ *
+ * Copyright IBM Corp. 2001, 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Ulrich Weigand <weigand@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef __ASM_S390_SYSINFO_H
+#define __ASM_S390_SYSINFO_H
+
+#include <asm/bitsperlong.h>
+#include <linux/uuid.h>
+
+struct sysinfo_1_1_1 {
+ unsigned char p:1;
+ unsigned char :6;
+ unsigned char t:1;
+ unsigned char :8;
+ unsigned char ccr;
+ unsigned char cai;
+ char reserved_0[28];
+ char manufacturer[16];
+ char type[4];
+ char reserved_1[12];
+ char model_capacity[16];
+ char sequence[16];
+ char plant[4];
+ char model[16];
+ char model_perm_cap[16];
+ char model_temp_cap[16];
+ unsigned int model_cap_rating;
+ unsigned int model_perm_cap_rating;
+ unsigned int model_temp_cap_rating;
+ unsigned char typepct[5];
+ unsigned char reserved_2[3];
+ unsigned int ncr;
+ unsigned int npr;
+ unsigned int ntr;
+};
+
+struct sysinfo_1_2_1 {
+ char reserved_0[80];
+ char sequence[16];
+ char plant[4];
+ char reserved_1[2];
+ unsigned short cpu_address;
+};
+
+struct sysinfo_1_2_2 {
+ char format;
+ char reserved_0[1];
+ unsigned short acc_offset;
+ char reserved_1[20];
+ unsigned int nominal_cap;
+ unsigned int secondary_cap;
+ unsigned int capability;
+ unsigned short cpus_total;
+ unsigned short cpus_configured;
+ unsigned short cpus_standby;
+ unsigned short cpus_reserved;
+ unsigned short adjustment[0];
+};
+
+struct sysinfo_1_2_2_extension {
+ unsigned int alt_capability;
+ unsigned short alt_adjustment[0];
+};
+
+struct sysinfo_2_2_1 {
+ char reserved_0[80];
+ char sequence[16];
+ char plant[4];
+ unsigned short cpu_id;
+ unsigned short cpu_address;
+};
+
+struct sysinfo_2_2_2 {
+ char reserved_0[32];
+ unsigned short lpar_number;
+ char reserved_1;
+ unsigned char characteristics;
+ unsigned short cpus_total;
+ unsigned short cpus_configured;
+ unsigned short cpus_standby;
+ unsigned short cpus_reserved;
+ char name[8];
+ unsigned int caf;
+ char reserved_2[8];
+ unsigned char mt_installed;
+ unsigned char mt_general;
+ unsigned char mt_psmtid;
+ char reserved_3[5];
+ unsigned short cpus_dedicated;
+ unsigned short cpus_shared;
+};
+
+#define LPAR_CHAR_DEDICATED (1 << 7)
+#define LPAR_CHAR_SHARED (1 << 6)
+#define LPAR_CHAR_LIMITED (1 << 5)
+
+struct sysinfo_3_2_2 {
+ char reserved_0[31];
+ unsigned char :4;
+ unsigned char count:4;
+ struct {
+ char reserved_0[4];
+ unsigned short cpus_total;
+ unsigned short cpus_configured;
+ unsigned short cpus_standby;
+ unsigned short cpus_reserved;
+ char name[8];
+ unsigned int caf;
+ char cpi[16];
+ char reserved_1[3];
+ char ext_name_encoding;
+ unsigned int reserved_2;
+ uuid_be uuid;
+ } vm[8];
+ char reserved_3[1504];
+ char ext_names[8][256];
+};
+
+extern int topology_max_mnest;
+
+#define TOPOLOGY_CORE_BITS 64
+#define TOPOLOGY_NR_MAG 6
+
+struct topology_core {
+ unsigned char nl;
+ unsigned char reserved0[3];
+ unsigned char :6;
+ unsigned char pp:2;
+ unsigned char reserved1;
+ unsigned short origin;
+ unsigned long mask[TOPOLOGY_CORE_BITS / BITS_PER_LONG];
+};
+
+struct topology_container {
+ unsigned char nl;
+ unsigned char reserved[6];
+ unsigned char id;
+};
+
+union topology_entry {
+ unsigned char nl;
+ struct topology_core cpu;
+ struct topology_container container;
+};
+
+struct sysinfo_15_1_x {
+ unsigned char reserved0[2];
+ unsigned short length;
+ unsigned char mag[TOPOLOGY_NR_MAG];
+ unsigned char reserved1;
+ unsigned char mnest;
+ unsigned char reserved2[4];
+ union topology_entry tle[0];
+};
+
+int stsi(void *sysinfo, int fc, int sel1, int sel2);
+
+/*
+ * Service level reporting interface.
+ */
+struct service_level {
+ struct list_head list;
+ void (*seq_print)(struct seq_file *, struct service_level *);
+};
+
+int register_service_level(struct service_level *);
+int unregister_service_level(struct service_level *);
+
+#endif /* __ASM_S390_SYSINFO_H */
diff --git a/kernel/arch/s390/include/asm/termios.h b/kernel/arch/s390/include/asm/termios.h
new file mode 100644
index 000000000..db028d17f
--- /dev/null
+++ b/kernel/arch/s390/include/asm/termios.h
@@ -0,0 +1,25 @@
+/*
+ * S390 version
+ *
+ * Derived from "include/asm-i386/termios.h"
+ */
+#ifndef _S390_TERMIOS_H
+#define _S390_TERMIOS_H
+
+#include <uapi/asm/termios.h>
+
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
+
+#include <asm-generic/termios-base.h>
+
+#endif /* _S390_TERMIOS_H */
diff --git a/kernel/arch/s390/include/asm/thread_info.h b/kernel/arch/s390/include/asm/thread_info.h
new file mode 100644
index 000000000..4c27ec764
--- /dev/null
+++ b/kernel/arch/s390/include/asm/thread_info.h
@@ -0,0 +1,99 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 2002, 2006
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+/*
+ * Size of kernel stack for each process
+ */
+#define THREAD_ORDER 2
+#define ASYNC_ORDER 2
+
+#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
+#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
+
+#ifndef __ASSEMBLY__
+#include <asm/lowcore.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants must also be changed
+ */
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ unsigned long flags; /* low level flags */
+ unsigned long sys_call_table; /* System call table address */
+ unsigned int cpu; /* current CPU */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+ unsigned int system_call;
+ __u64 user_timer;
+ __u64 system_timer;
+ unsigned long last_break; /* last breaking-event-address. */
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ return (struct thread_info *) S390_lowcore.thread_info;
+}
+
+void arch_release_task_struct(struct task_struct *tsk);
+
+#define THREAD_SIZE_ORDER THREAD_ORDER
+
+#endif
+
+/*
+ * thread information flags bit numbers
+ */
+#define TIF_NOTIFY_RESUME 0 /* callback before returning to user */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
+#define TIF_SECCOMP 5 /* secure computing */
+#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+#define TIF_UPROBE 7 /* breakpointed or single-stepping */
+#define TIF_31BIT 16 /* 32bit process */
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
+#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
+#define TIF_SINGLE_STEP 19 /* This task is single stepped */
+#define TIF_BLOCK_STEP 20 /* This task is block stepped */
+#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
+
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP (1<<TIF_SECCOMP)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_UPROBE (1<<TIF_UPROBE)
+#define _TIF_31BIT (1<<TIF_31BIT)
+#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
+
+#define is_32bit_task() (test_thread_flag(TIF_31BIT))
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/kernel/arch/s390/include/asm/timex.h b/kernel/arch/s390/include/asm/timex.h
new file mode 100644
index 000000000..98eb2a579
--- /dev/null
+++ b/kernel/arch/s390/include/asm/timex.h
@@ -0,0 +1,163 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999
+ *
+ * Derived from "include/asm-i386/timex.h"
+ * Copyright (C) 1992, Linus Torvalds
+ */
+
+#ifndef _ASM_S390_TIMEX_H
+#define _ASM_S390_TIMEX_H
+
+#include <asm/lowcore.h>
+
+/* The value of the TOD clock for 1.1.1970. */
+#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
+
+/* Inline functions for clock register access. */
+static inline int set_tod_clock(__u64 time)
+{
+ int cc;
+
+ asm volatile(
+ " sck %1\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc) : "Q" (time) : "cc");
+ return cc;
+}
+
+static inline int store_tod_clock(__u64 *time)
+{
+ int cc;
+
+ asm volatile(
+ " stck %1\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc), "=Q" (*time) : : "cc");
+ return cc;
+}
+
+static inline void set_clock_comparator(__u64 time)
+{
+ asm volatile("sckc %0" : : "Q" (time));
+}
+
+static inline void store_clock_comparator(__u64 *time)
+{
+ asm volatile("stckc %0" : "=Q" (*time));
+}
+
+void clock_comparator_work(void);
+
+static inline unsigned long long local_tick_disable(void)
+{
+ unsigned long long old;
+
+ old = S390_lowcore.clock_comparator;
+ S390_lowcore.clock_comparator = -1ULL;
+ set_clock_comparator(S390_lowcore.clock_comparator);
+ return old;
+}
+
+static inline void local_tick_enable(unsigned long long comp)
+{
+ S390_lowcore.clock_comparator = comp;
+ set_clock_comparator(S390_lowcore.clock_comparator);
+}
+
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */
+
+typedef unsigned long long cycles_t;
+
+static inline void get_tod_clock_ext(char *clk)
+{
+ typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
+
+ asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
+}
+
+static inline unsigned long long get_tod_clock(void)
+{
+ unsigned char clk[STORE_CLOCK_EXT_SIZE];
+
+ get_tod_clock_ext(clk);
+ return *((unsigned long long *)&clk[1]);
+}
+
+static inline unsigned long long get_tod_clock_fast(void)
+{
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+ unsigned long long clk;
+
+ asm volatile("stckf %0" : "=Q" (clk) : : "cc");
+ return clk;
+#else
+ return get_tod_clock();
+#endif
+}
+
+static inline cycles_t get_cycles(void)
+{
+ return (cycles_t) get_tod_clock() >> 2;
+}
+
+int get_sync_clock(unsigned long long *clock);
+void init_cpu_timer(void);
+unsigned long long monotonic_clock(void);
+
+void tod_to_timeval(__u64, struct timespec *);
+
+static inline
+void stck_to_timespec(unsigned long long stck, struct timespec *ts)
+{
+ tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
+}
+
+extern u64 sched_clock_base_cc;
+
+/**
+ * get_clock_monotonic - returns current time in clock rate units
+ *
+ * The caller must ensure that preemption is disabled.
+ * The clock and sched_clock_base get changed via stop_machine.
+ * Therefore preemption must be disabled when calling this
+ * function, otherwise the returned value is not guaranteed to
+ * be monotonic.
+ */
+static inline unsigned long long get_tod_clock_monotonic(void)
+{
+ return get_tod_clock() - sched_clock_base_cc;
+}
+
+/**
+ * tod_to_ns - convert a TOD format value to nanoseconds
+ * @todval: to be converted TOD format value
+ * Returns: number of nanoseconds that correspond to the TOD format value
+ *
+ * Converting a 64 Bit TOD format value to nanoseconds means that the value
+ * must be divided by 4.096. In order to achieve that we multiply with 125
+ * and divide by 512:
+ *
+ * ns = (todval * 125) >> 9;
+ *
+ * In order to avoid an overflow with the multiplication we can rewrite this.
+ * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
+ * we end up with
+ *
+ * ns = ((2^32 * th + tl) * 125 ) >> 9;
+ * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
+ *
+ */
+static inline unsigned long long tod_to_ns(unsigned long long todval)
+{
+ unsigned long long ns;
+
+ ns = ((todval >> 32) << 23) * 125;
+ ns += ((todval & 0xffffffff) * 125) >> 9;
+ return ns;
+}
+
+#endif
diff --git a/kernel/arch/s390/include/asm/tlb.h b/kernel/arch/s390/include/asm/tlb.h
new file mode 100644
index 000000000..7a92e69c5
--- /dev/null
+++ b/kernel/arch/s390/include/asm/tlb.h
@@ -0,0 +1,148 @@
+#ifndef _S390_TLB_H
+#define _S390_TLB_H
+
+/*
+ * TLB flushing on s390 is complicated. The following requirement
+ * from the principles of operation is the most arduous:
+ *
+ * "A valid table entry must not be changed while it is attached
+ * to any CPU and may be used for translation by that CPU except to
+ * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
+ * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
+ * table entry, or (3) make a change by means of a COMPARE AND SWAP
+ * AND PURGE instruction that purges the TLB."
+ *
+ * The modification of a pte of an active mm struct therefore is
+ * a two step process: i) invalidate the pte, ii) store the new pte.
+ * This is true for the page protection bit as well.
+ * The only possible optimization is to flush at the beginning of
+ * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
+ *
+ * Pages used for the page tables is a different story. FIXME: more
+ */
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <asm/processor.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+struct mmu_gather {
+ struct mm_struct *mm;
+ struct mmu_table_batch *batch;
+ unsigned int fullmm;
+ unsigned long start, end;
+};
+
+struct mmu_table_batch {
+ struct rcu_head rcu;
+ unsigned int nr;
+ void *tables[0];
+};
+
+#define MAX_TABLE_BATCH \
+ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
+
+extern void tlb_table_flush(struct mmu_gather *tlb);
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+
+static inline void tlb_gather_mmu(struct mmu_gather *tlb,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ tlb->mm = mm;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
+ tlb->batch = NULL;
+}
+
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+ __tlb_flush_mm_lazy(tlb->mm);
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+ tlb_table_flush(tlb);
+}
+
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+ tlb_flush_mmu_tlbonly(tlb);
+ tlb_flush_mmu_free(tlb);
+}
+
+static inline void tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end)
+{
+ tlb_flush_mmu(tlb);
+}
+
+/*
+ * Release the page cache reference for a pte removed by
+ * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
+ * has already been freed, so just do free_page_and_swap_cache.
+ */
+static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ free_page_and_swap_cache(page);
+ return 1; /* avoid calling tlb_flush_mmu */
+}
+
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ free_page_and_swap_cache(page);
+}
+
+/*
+ * pte_free_tlb frees a pte table and clears the CRSTE for the
+ * page table from the tlb.
+ */
+static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ unsigned long address)
+{
+ page_table_free_rcu(tlb, (unsigned long *) pte, address);
+}
+
+/*
+ * pmd_free_tlb frees a pmd table and clears the CRSTE for the
+ * segment table entry from the tlb.
+ * If the mm uses a two level page table the single pmd is freed
+ * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
+ * to avoid the double free of the pmd in this case.
+ */
+static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long address)
+{
+ if (tlb->mm->context.asce_limit <= (1UL << 31))
+ return;
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
+ tlb_remove_table(tlb, pmd);
+}
+
+/*
+ * pud_free_tlb frees a pud table and clears the CRSTE for the
+ * region third table entry from the tlb.
+ * If the mm uses a three level page table the single pud is freed
+ * as the pgd. pud_free_tlb checks the asce_limit against 4TB
+ * to avoid the double free of the pud in this case.
+ */
+static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long address)
+{
+ if (tlb->mm->context.asce_limit <= (1UL << 42))
+ return;
+ tlb_remove_table(tlb, pud);
+}
+
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
+#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
+#define tlb_migrate_finish(mm) do { } while (0)
+
+#endif /* _S390_TLB_H */
diff --git a/kernel/arch/s390/include/asm/tlbflush.h b/kernel/arch/s390/include/asm/tlbflush.h
new file mode 100644
index 000000000..ca148f7c3
--- /dev/null
+++ b/kernel/arch/s390/include/asm/tlbflush.h
@@ -0,0 +1,204 @@
+#ifndef _S390_TLBFLUSH_H
+#define _S390_TLBFLUSH_H
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/processor.h>
+#include <asm/pgalloc.h>
+
+/*
+ * Flush all TLB entries on the local CPU.
+ */
+static inline void __tlb_flush_local(void)
+{
+ asm volatile("ptlb" : : : "memory");
+}
+
+/*
+ * Flush TLB entries for a specific ASCE on all CPUs
+ */
+static inline void __tlb_flush_idte(unsigned long asce)
+{
+ /* Global TLB flush for the mm */
+ asm volatile(
+ " .insn rrf,0xb98e0000,0,%0,%1,0"
+ : : "a" (2048), "a" (asce) : "cc");
+}
+
+/*
+ * Flush TLB entries for a specific ASCE on the local CPU
+ */
+static inline void __tlb_flush_idte_local(unsigned long asce)
+{
+ /* Local TLB flush for the mm */
+ asm volatile(
+ " .insn rrf,0xb98e0000,0,%0,%1,1"
+ : : "a" (2048), "a" (asce) : "cc");
+}
+
+#ifdef CONFIG_SMP
+void smp_ptlb_all(void);
+
+/*
+ * Flush all TLB entries on all CPUs.
+ */
+static inline void __tlb_flush_global(void)
+{
+ register unsigned long reg2 asm("2");
+ register unsigned long reg3 asm("3");
+ register unsigned long reg4 asm("4");
+ long dummy;
+
+ dummy = 0;
+ reg2 = reg3 = 0;
+ reg4 = ((unsigned long) &dummy) + 1;
+ asm volatile(
+ " csp %0,%2"
+ : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
+}
+
+/*
+ * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
+ * this implicates multiple ASCEs!).
+ */
+static inline void __tlb_flush_full(struct mm_struct *mm)
+{
+ preempt_disable();
+ atomic_add(0x10000, &mm->context.attach_count);
+ if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
+ /* Local TLB flush */
+ __tlb_flush_local();
+ } else {
+ /* Global TLB flush */
+ __tlb_flush_global();
+ /* Reset TLB flush mask */
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_copy(mm_cpumask(mm),
+ &mm->context.cpu_attach_mask);
+ }
+ atomic_sub(0x10000, &mm->context.attach_count);
+ preempt_enable();
+}
+
+/*
+ * Flush TLB entries for a specific ASCE on all CPUs.
+ */
+static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+{
+ int active, count;
+
+ preempt_disable();
+ active = (mm == current->active_mm) ? 1 : 0;
+ count = atomic_add_return(0x10000, &mm->context.attach_count);
+ if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+ cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
+ __tlb_flush_idte_local(asce);
+ } else {
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_idte(asce);
+ else
+ __tlb_flush_global();
+ /* Reset TLB flush mask */
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_copy(mm_cpumask(mm),
+ &mm->context.cpu_attach_mask);
+ }
+ atomic_sub(0x10000, &mm->context.attach_count);
+ preempt_enable();
+}
+
+static inline void __tlb_flush_kernel(void)
+{
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_idte((unsigned long) init_mm.pgd |
+ init_mm.context.asce_bits);
+ else
+ __tlb_flush_global();
+}
+#else
+#define __tlb_flush_global() __tlb_flush_local()
+#define __tlb_flush_full(mm) __tlb_flush_local()
+
+/*
+ * Flush TLB entries for a specific ASCE on all CPUs.
+ */
+static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+{
+ if (MACHINE_HAS_TLB_LC)
+ __tlb_flush_idte_local(asce);
+ else
+ __tlb_flush_local();
+}
+
+static inline void __tlb_flush_kernel(void)
+{
+ if (MACHINE_HAS_TLB_LC)
+ __tlb_flush_idte_local((unsigned long) init_mm.pgd |
+ init_mm.context.asce_bits);
+ else
+ __tlb_flush_local();
+}
+#endif
+
+static inline void __tlb_flush_mm(struct mm_struct * mm)
+{
+ /*
+ * If the machine has IDTE we prefer to do a per mm flush
+ * on all cpus instead of doing a local flush if the mm
+ * only ran on the local cpu.
+ */
+ if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
+ __tlb_flush_asce(mm, (unsigned long) mm->pgd |
+ mm->context.asce_bits);
+ else
+ __tlb_flush_full(mm);
+}
+
+static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
+{
+ if (mm->context.flush_mm) {
+ __tlb_flush_mm(mm);
+ mm->context.flush_mm = 0;
+ }
+}
+
+/*
+ * TLB flushing:
+ * flush_tlb() - flushes the current mm struct TLBs
+ * flush_tlb_all() - flushes all processes TLBs
+ * flush_tlb_mm(mm) - flushes the specified mm context TLB's
+ * flush_tlb_page(vma, vmaddr) - flushes one page
+ * flush_tlb_range(vma, start, end) - flushes a range of pages
+ * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
+ */
+
+/*
+ * flush_tlb_mm goes together with ptep_set_wrprotect for the
+ * copy_page_range operation and flush_tlb_range is related to
+ * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
+ * ptep_get_and_clear do not flush the TLBs directly if the mm has
+ * only one user. At the end of the update the flush_tlb_mm and
+ * flush_tlb_range functions need to do the flush.
+ */
+#define flush_tlb() do { } while (0)
+#define flush_tlb_all() do { } while (0)
+#define flush_tlb_page(vma, addr) do { } while (0)
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ __tlb_flush_mm_lazy(mm);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ __tlb_flush_mm_lazy(vma->vm_mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ __tlb_flush_kernel();
+}
+
+#endif /* _S390_TLBFLUSH_H */
diff --git a/kernel/arch/s390/include/asm/topology.h b/kernel/arch/s390/include/asm/topology.h
new file mode 100644
index 000000000..b1453a2ae
--- /dev/null
+++ b/kernel/arch/s390/include/asm/topology.h
@@ -0,0 +1,56 @@
+#ifndef _ASM_S390_TOPOLOGY_H
+#define _ASM_S390_TOPOLOGY_H
+
+#include <linux/cpumask.h>
+
+struct sysinfo_15_1_x;
+struct cpu;
+
+#ifdef CONFIG_SCHED_BOOK
+
+struct cpu_topology_s390 {
+ unsigned short thread_id;
+ unsigned short core_id;
+ unsigned short socket_id;
+ unsigned short book_id;
+ cpumask_t thread_mask;
+ cpumask_t core_mask;
+ cpumask_t book_mask;
+};
+
+DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
+
+#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
+#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
+#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
+#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
+#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
+#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
+#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
+
+#define mc_capable() 1
+
+int topology_cpu_init(struct cpu *);
+int topology_set_cpu_management(int fc);
+void topology_schedule_update(void);
+void store_topology(struct sysinfo_15_1_x *info);
+void topology_expect_change(void);
+const struct cpumask *cpu_coregroup_mask(int cpu);
+
+#else /* CONFIG_SCHED_BOOK */
+
+static inline void topology_schedule_update(void) { }
+static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
+static inline void topology_expect_change(void) { }
+
+#endif /* CONFIG_SCHED_BOOK */
+
+#define POLARIZATION_UNKNOWN (-1)
+#define POLARIZATION_HRZ (0)
+#define POLARIZATION_VL (1)
+#define POLARIZATION_VM (2)
+#define POLARIZATION_VH (3)
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/kernel/arch/s390/include/asm/types.h b/kernel/arch/s390/include/asm/types.h
new file mode 100644
index 000000000..6740f4f97
--- /dev/null
+++ b/kernel/arch/s390/include/asm/types.h
@@ -0,0 +1,11 @@
+/*
+ * S390 version
+ *
+ * Derived from "include/asm-i386/types.h"
+ */
+#ifndef _S390_TYPES_H
+#define _S390_TYPES_H
+
+#include <uapi/asm/types.h>
+
+#endif /* _S390_TYPES_H */
diff --git a/kernel/arch/s390/include/asm/uaccess.h b/kernel/arch/s390/include/asm/uaccess.h
new file mode 100644
index 000000000..9dd4cc47d
--- /dev/null
+++ b/kernel/arch/s390/include/asm/uaccess.h
@@ -0,0 +1,382 @@
+/*
+ * S390 version
+ * Copyright IBM Corp. 1999, 2000
+ * Author(s): Hartmut Penner (hp@de.ibm.com),
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/uaccess.h"
+ */
+#ifndef __S390_UACCESS_H
+#define __S390_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <asm/ctl_reg.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
+
+
+#define KERNEL_DS MAKE_MM_SEG(0)
+#define USER_DS MAKE_MM_SEG(1)
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->thread.mm_segment)
+
+#define set_fs(x) \
+({ \
+ unsigned long __pto; \
+ current->thread.mm_segment = (x); \
+ __pto = current->thread.mm_segment.ar4 ? \
+ S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
+ __ctl_load(__pto, 7, 7); \
+})
+
+#define segment_eq(a,b) ((a).ar4 == (b).ar4)
+
+static inline int __range_ok(unsigned long addr, unsigned long size)
+{
+ return 1;
+}
+
+#define __access_ok(addr, size) \
+({ \
+ __chk_user_ptr(addr); \
+ __range_ok((unsigned long)(addr), (size)); \
+})
+
+#define access_ok(type, addr, size) __access_ok(addr, size)
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+ int insn, fixup;
+};
+
+static inline unsigned long extable_insn(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->insn + x->insn;
+}
+
+static inline unsigned long extable_fixup(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->fixup + x->fixup;
+}
+
+#define ARCH_HAS_SORT_EXTABLE
+#define ARCH_HAS_SEARCH_EXTABLE
+
+/**
+ * __copy_from_user: - Copy a block of data from user space, with less checking.
+ * @to: Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Copy data from user space to kernel space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+unsigned long __must_check __copy_from_user(void *to, const void __user *from,
+ unsigned long n);
+
+/**
+ * __copy_to_user: - Copy a block of data into user space, with less checking.
+ * @to: Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Copy data from kernel space to user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long __must_check __copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+
+#define __put_get_user_asm(to, from, size, spec) \
+({ \
+ register unsigned long __reg0 asm("0") = spec; \
+ int __rc; \
+ \
+ asm volatile( \
+ "0: mvcos %1,%3,%2\n" \
+ "1: xr %0,%0\n" \
+ "2:\n" \
+ ".pushsection .fixup, \"ax\"\n" \
+ "3: lhi %0,%5\n" \
+ " jg 2b\n" \
+ ".popsection\n" \
+ EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
+ : "=d" (__rc), "=Q" (*(to)) \
+ : "d" (size), "Q" (*(from)), \
+ "d" (__reg0), "K" (-EFAULT) \
+ : "cc"); \
+ __rc; \
+})
+
+#define __put_user_fn(x, ptr, size) __put_get_user_asm(ptr, x, size, 0x810000UL)
+#define __get_user_fn(x, ptr, size) __put_get_user_asm(x, ptr, size, 0x81UL)
+
+#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
+
+static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
+{
+ size = __copy_to_user(ptr, x, size);
+ return size ? -EFAULT : 0;
+}
+
+static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
+{
+ size = __copy_from_user(x, ptr, size);
+ return size ? -EFAULT : 0;
+}
+
+#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ */
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __x = (x); \
+ int __pu_err = -EFAULT; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ case 8: \
+ __pu_err = __put_user_fn(&__x, ptr, \
+ sizeof(*(ptr))); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ break; \
+ } \
+ __pu_err; \
+})
+
+#define put_user(x, ptr) \
+({ \
+ might_fault(); \
+ __put_user(x, ptr); \
+})
+
+
+int __put_user_bad(void) __attribute__((noreturn));
+
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = -EFAULT; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: { \
+ unsigned char __x; \
+ __gu_err = __get_user_fn(&__x, ptr, \
+ sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 2: { \
+ unsigned short __x; \
+ __gu_err = __get_user_fn(&__x, ptr, \
+ sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 4: { \
+ unsigned int __x; \
+ __gu_err = __get_user_fn(&__x, ptr, \
+ sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 8: { \
+ unsigned long long __x; \
+ __gu_err = __get_user_fn(&__x, ptr, \
+ sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ default: \
+ __get_user_bad(); \
+ break; \
+ } \
+ __gu_err; \
+})
+
+#define get_user(x, ptr) \
+({ \
+ might_fault(); \
+ __get_user(x, ptr); \
+})
+
+int __get_user_bad(void) __attribute__((noreturn));
+
+#define __put_user_unaligned __put_user
+#define __get_user_unaligned __get_user
+
+/**
+ * copy_to_user: - Copy a block of data into user space.
+ * @to: Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Copy data from kernel space to user space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+static inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ return __copy_to_user(to, from, n);
+}
+
+void copy_from_user_overflow(void)
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+__compiletime_warning("copy_from_user() buffer size is not provably correct")
+#endif
+;
+
+/**
+ * copy_from_user: - Copy a block of data from user space.
+ * @to: Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Copy data from user space to kernel space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+static inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned int sz = __compiletime_object_size(to);
+
+ might_fault();
+ if (unlikely(sz != -1 && sz < n)) {
+ copy_from_user_overflow();
+ return n;
+ }
+ return __copy_from_user(to, from, n);
+}
+
+unsigned long __must_check
+__copy_in_user(void __user *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+ might_fault();
+ return __copy_in_user(to, from, n);
+}
+
+/*
+ * Copy a null terminated string from userspace.
+ */
+
+long __strncpy_from_user(char *dst, const char __user *src, long count);
+
+static inline long __must_check
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ might_fault();
+ return __strncpy_from_user(dst, src, count);
+}
+
+unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
+
+static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
+{
+ might_fault();
+ return __strnlen_user(src, n);
+}
+
+/**
+ * strlen_user: - Get the size of a string in user space.
+ * @str: The string to measure.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * On exception, returns 0.
+ *
+ * If there is a limit on the length of a valid string, you may wish to
+ * consider using strnlen_user() instead.
+ */
+#define strlen_user(str) strnlen_user(str, ~0UL)
+
+/*
+ * Zero Userspace
+ */
+unsigned long __must_check __clear_user(void __user *to, unsigned long size);
+
+static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+{
+ might_fault();
+ return __clear_user(to, n);
+}
+
+int copy_to_user_real(void __user *dest, void *src, unsigned long count);
+void s390_kernel_write(void *dst, const void *src, size_t size);
+
+#endif /* __S390_UACCESS_H */
diff --git a/kernel/arch/s390/include/asm/unaligned.h b/kernel/arch/s390/include/asm/unaligned.h
new file mode 100644
index 000000000..da9627afe
--- /dev/null
+++ b/kernel/arch/s390/include/asm/unaligned.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_S390_UNALIGNED_H
+#define _ASM_S390_UNALIGNED_H
+
+/*
+ * The S390 can do unaligned accesses itself.
+ */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
+
+#define get_unaligned __get_unaligned_be
+#define put_unaligned __put_unaligned_be
+
+#endif /* _ASM_S390_UNALIGNED_H */
diff --git a/kernel/arch/s390/include/asm/unistd.h b/kernel/arch/s390/include/asm/unistd.h
new file mode 100644
index 000000000..91f56b1d8
--- /dev/null
+++ b/kernel/arch/s390/include/asm/unistd.h
@@ -0,0 +1,49 @@
+/*
+ * S390 version
+ *
+ * Derived from "include/asm-i386/unistd.h"
+ */
+#ifndef _ASM_S390_UNISTD_H_
+#define _ASM_S390_UNISTD_H_
+
+#include <uapi/asm/unistd.h>
+
+
+#define __IGNORE_time
+
+/* Ignore NUMA system calls. Not wired up on s390. */
+#define __IGNORE_mbind
+#define __IGNORE_get_mempolicy
+#define __IGNORE_set_mempolicy
+#define __IGNORE_migrate_pages
+#define __IGNORE_move_pages
+
+/* Ignore system calls that are also reachable via sys_socket */
+#define __IGNORE_recvmmsg
+#define __IGNORE_sendmmsg
+
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_IPC
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLD_MMAP
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+# ifdef CONFIG_COMPAT
+# define __ARCH_WANT_COMPAT_SYS_TIME
+# endif
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_CLONE
+
+#endif /* _ASM_S390_UNISTD_H_ */
diff --git a/kernel/arch/s390/include/asm/uprobes.h b/kernel/arch/s390/include/asm/uprobes.h
new file mode 100644
index 000000000..1411dff7f
--- /dev/null
+++ b/kernel/arch/s390/include/asm/uprobes.h
@@ -0,0 +1,42 @@
+/*
+ * User-space Probes (UProbes) for s390
+ *
+ * Copyright IBM Corp. 2014
+ * Author(s): Jan Willeke,
+ */
+
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+
+#include <linux/notifier.h>
+
+typedef u16 uprobe_opcode_t;
+
+#define UPROBE_XOL_SLOT_BYTES 256 /* cache aligned */
+
+#define UPROBE_SWBP_INSN 0x0002
+#define UPROBE_SWBP_INSN_SIZE 2
+
+struct arch_uprobe {
+ union{
+ uprobe_opcode_t insn[3];
+ uprobe_opcode_t ixol[3];
+ };
+ unsigned int saved_per : 1;
+ unsigned int saved_int_code;
+};
+
+struct arch_uprobe_task {
+};
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm,
+ unsigned long addr);
+int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
+ void *data);
+void arch_uprobe_abort_xol(struct arch_uprobe *ap, struct pt_regs *regs);
+unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
+ struct pt_regs *regs);
+#endif /* _ASM_UPROBES_H */
diff --git a/kernel/arch/s390/include/asm/user.h b/kernel/arch/s390/include/asm/user.h
new file mode 100644
index 000000000..6ed1d1886
--- /dev/null
+++ b/kernel/arch/s390/include/asm/user.h
@@ -0,0 +1,74 @@
+/*
+ * S390 version
+ *
+ * Derived from "include/asm-i386/usr.h"
+ */
+
+#ifndef _S390_USER_H
+#define _S390_USER_H
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+/* Core file format: The core file is written in such a way that gdb
+ can understand it and provide useful information to the user (under
+ linux we use the 'trad-core' bfd). There are quite a number of
+ obstacles to being able to view the contents of the floating point
+ registers, and until these are solved you will not be able to view the
+ contents of them. Actually, you can read in the core file and look at
+ the contents of the user struct to find out what the floating point
+ registers contain.
+ The actual file contents are as follows:
+ UPAGE: 1 page consisting of a user struct that tells gdb what is present
+ in the file. Directly after this is a copy of the task_struct, which
+ is currently not used by gdb, but it may come in useful at some point.
+ All of the registers are stored as part of the upage. The upage should
+ always be only one page.
+ DATA: The data area is stored. We use current->end_text to
+ current->brk to pick up all of the user variables, plus any memory
+ that may have been malloced. No attempt is made to determine if a page
+ is demand-zero or if a page is totally unused, we just cover the entire
+ range. All of the addresses are rounded in such a way that an integral
+ number of pages is written.
+ STACK: We need the stack information in order to get a meaningful
+ backtrace. We need to write the data from (esp) to
+ current->start_stack, so we round each of these off in order to be able
+ to write an integer number of pages.
+ The minimum core file size is 3 pages, or 12288 bytes.
+*/
+
+
+/*
+ * This is the old layout of "struct pt_regs", and
+ * is still the layout used by user mode (the new
+ * pt_regs doesn't have all registers as the kernel
+ * doesn't use the extra segment registers)
+ */
+
+/* When the kernel dumps core, it starts by dumping the user struct -
+ this will be used by gdb to figure out where the data and stack segments
+ are within the file, and what virtual addresses to use. */
+struct user {
+/* We start with the registers, to mimic the way that "memory" is returned
+ from the ptrace(3,...) function. */
+ struct user_regs_struct regs; /* Where the registers are actually stored */
+/* The rest of this junk is to help gdb figure out what goes where */
+ unsigned long int u_tsize; /* Text segment size (pages). */
+ unsigned long int u_dsize; /* Data segment size (pages). */
+ unsigned long int u_ssize; /* Stack segment size (pages). */
+ unsigned long start_code; /* Starting virtual address of text. */
+ unsigned long start_stack; /* Starting virtual address of stack area.
+ This is actually the bottom of the stack,
+ the top of the stack is always found in the
+ esp register. */
+ long int signal; /* Signal that caused the core dump. */
+ unsigned long u_ar0; /* Used by gdb to help find the values for */
+ /* the registers. */
+ unsigned long magic; /* To uniquely identify a core file */
+ char u_comm[32]; /* User command that was responsible */
+};
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _S390_USER_H */
diff --git a/kernel/arch/s390/include/asm/vdso.h b/kernel/arch/s390/include/asm/vdso.h
new file mode 100644
index 000000000..787acd4f9
--- /dev/null
+++ b/kernel/arch/s390/include/asm/vdso.h
@@ -0,0 +1,49 @@
+#ifndef __S390_VDSO_H__
+#define __S390_VDSO_H__
+
+/* Default link addresses for the vDSOs */
+#define VDSO32_LBASE 0
+#define VDSO64_LBASE 0
+
+#define VDSO_VERSION_STRING LINUX_2.6.29
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Note about the vdso_data and vdso_per_cpu_data structures:
+ *
+ * NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
+ * structure is supposed to be known only to the function in the vdso
+ * itself and may change without notice.
+ */
+
+struct vdso_data {
+ __u64 tb_update_count; /* Timebase atomicity ctr 0x00 */
+ __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */
+ __u64 xtime_clock_sec; /* Kernel time 0x10 */
+ __u64 xtime_clock_nsec; /* 0x18 */
+ __u64 xtime_coarse_sec; /* Coarse kernel time 0x20 */
+ __u64 xtime_coarse_nsec; /* 0x28 */
+ __u64 wtom_clock_sec; /* Wall to monotonic clock 0x30 */
+ __u64 wtom_clock_nsec; /* 0x38 */
+ __u64 wtom_coarse_sec; /* Coarse wall to monotonic 0x40 */
+ __u64 wtom_coarse_nsec; /* 0x48 */
+ __u32 tz_minuteswest; /* Minutes west of Greenwich 0x50 */
+ __u32 tz_dsttime; /* Type of dst correction 0x54 */
+ __u32 ectg_available; /* ECTG instruction present 0x58 */
+ __u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
+ __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
+};
+
+struct vdso_per_cpu_data {
+ __u64 ectg_timer_base;
+ __u64 ectg_user_time;
+};
+
+extern struct vdso_data *vdso_data;
+
+int vdso_alloc_per_cpu(struct _lowcore *lowcore);
+void vdso_free_per_cpu(struct _lowcore *lowcore);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __S390_VDSO_H__ */
diff --git a/kernel/arch/s390/include/asm/vga.h b/kernel/arch/s390/include/asm/vga.h
new file mode 100644
index 000000000..d375526c2
--- /dev/null
+++ b/kernel/arch/s390/include/asm/vga.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_S390_VGA_H
+#define _ASM_S390_VGA_H
+
+/* Avoid compile errors due to missing asm/vga.h */
+
+#endif /* _ASM_S390_VGA_H */
diff --git a/kernel/arch/s390/include/asm/vtime.h b/kernel/arch/s390/include/asm/vtime.h
new file mode 100644
index 000000000..af9896c53
--- /dev/null
+++ b/kernel/arch/s390/include/asm/vtime.h
@@ -0,0 +1,7 @@
+#ifndef _S390_VTIME_H
+#define _S390_VTIME_H
+
+#define __ARCH_HAS_VTIME_ACCOUNT
+#define __ARCH_HAS_VTIME_TASK_SWITCH
+
+#endif /* _S390_VTIME_H */
diff --git a/kernel/arch/s390/include/asm/vtimer.h b/kernel/arch/s390/include/asm/vtimer.h
new file mode 100644
index 000000000..10a179af6
--- /dev/null
+++ b/kernel/arch/s390/include/asm/vtimer.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright IBM Corp. 2003, 2012
+ * Virtual CPU timer
+ *
+ * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_TIMER_H
+#define _ASM_S390_TIMER_H
+
+#define VTIMER_MAX_SLICE (0x7fffffffffffffffULL)
+
+struct vtimer_list {
+ struct list_head entry;
+ u64 expires;
+ u64 interval;
+ void (*function)(unsigned long);
+ unsigned long data;
+};
+
+extern void init_virt_timer(struct vtimer_list *timer);
+extern void add_virt_timer(struct vtimer_list *timer);
+extern void add_virt_timer_periodic(struct vtimer_list *timer);
+extern int mod_virt_timer(struct vtimer_list *timer, u64 expires);
+extern int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires);
+extern int del_virt_timer(struct vtimer_list *timer);
+
+extern void init_cpu_vtimer(void);
+extern void vtime_init(void);
+
+#endif /* _ASM_S390_TIMER_H */
diff --git a/kernel/arch/s390/include/asm/xor.h b/kernel/arch/s390/include/asm/xor.h
new file mode 100644
index 000000000..c82eb12a5
--- /dev/null
+++ b/kernel/arch/s390/include/asm/xor.h
@@ -0,0 +1 @@
+#include <asm-generic/xor.h>