summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/s390/cio')
-rw-r--r--kernel/drivers/s390/cio/Makefile16
-rw-r--r--kernel/drivers/s390/cio/airq.c275
-rw-r--r--kernel/drivers/s390/cio/blacklist.c423
-rw-r--r--kernel/drivers/s390/cio/blacklist.h6
-rw-r--r--kernel/drivers/s390/cio/ccwgroup.c641
-rw-r--r--kernel/drivers/s390/cio/ccwreq.c367
-rw-r--r--kernel/drivers/s390/cio/chp.c792
-rw-r--r--kernel/drivers/s390/cio/chp.h72
-rw-r--r--kernel/drivers/s390/cio/chsc.c1250
-rw-r--r--kernel/drivers/s390/cio/chsc.h238
-rw-r--r--kernel/drivers/s390/cio/chsc_sch.c1017
-rw-r--r--kernel/drivers/s390/cio/chsc_sch.h13
-rw-r--r--kernel/drivers/s390/cio/cio.c1020
-rw-r--r--kernel/drivers/s390/cio/cio.h137
-rw-r--r--kernel/drivers/s390/cio/cio_debug.h34
-rw-r--r--kernel/drivers/s390/cio/cmf.c1347
-rw-r--r--kernel/drivers/s390/cio/crw.c161
-rw-r--r--kernel/drivers/s390/cio/css.c1294
-rw-r--r--kernel/drivers/s390/cio/css.h146
-rw-r--r--kernel/drivers/s390/cio/device.c2161
-rw-r--r--kernel/drivers/s390/cio/device.h150
-rw-r--r--kernel/drivers/s390/cio/device_fsm.c1117
-rw-r--r--kernel/drivers/s390/cio/device_id.c222
-rw-r--r--kernel/drivers/s390/cio/device_ops.c799
-rw-r--r--kernel/drivers/s390/cio/device_pgid.c669
-rw-r--r--kernel/drivers/s390/cio/device_status.c397
-rw-r--r--kernel/drivers/s390/cio/eadm_sch.c418
-rw-r--r--kernel/drivers/s390/cio/eadm_sch.h22
-rw-r--r--kernel/drivers/s390/cio/fcx.c350
-rw-r--r--kernel/drivers/s390/cio/idset.c111
-rw-r--r--kernel/drivers/s390/cio/idset.h24
-rw-r--r--kernel/drivers/s390/cio/io_sch.h217
-rw-r--r--kernel/drivers/s390/cio/ioasm.h167
-rw-r--r--kernel/drivers/s390/cio/isc.c68
-rw-r--r--kernel/drivers/s390/cio/itcw.c369
-rw-r--r--kernel/drivers/s390/cio/orb.h91
-rw-r--r--kernel/drivers/s390/cio/qdio.h418
-rw-r--r--kernel/drivers/s390/cio/qdio_debug.c373
-rw-r--r--kernel/drivers/s390/cio/qdio_debug.h86
-rw-r--r--kernel/drivers/s390/cio/qdio_main.c1880
-rw-r--r--kernel/drivers/s390/cio/qdio_setup.c597
-rw-r--r--kernel/drivers/s390/cio/qdio_thinint.c299
-rw-r--r--kernel/drivers/s390/cio/scm.c288
43 files changed, 20542 insertions, 0 deletions
diff --git a/kernel/drivers/s390/cio/Makefile b/kernel/drivers/s390/cio/Makefile
new file mode 100644
index 000000000..8c4a386e9
--- /dev/null
+++ b/kernel/drivers/s390/cio/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for the S/390 common i/o drivers
+#
+
+obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
+ fcx.o itcw.o crw.o ccwreq.o
+ccw_device-objs += device.o device_fsm.o device_ops.o
+ccw_device-objs += device_id.o device_pgid.o device_status.o
+obj-y += ccw_device.o cmf.o
+obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
+obj-$(CONFIG_EADM_SCH) += eadm_sch.o
+obj-$(CONFIG_SCM_BUS) += scm.o
+obj-$(CONFIG_CCWGROUP) += ccwgroup.o
+
+qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
+obj-$(CONFIG_QDIO) += qdio.o
diff --git a/kernel/drivers/s390/cio/airq.c b/kernel/drivers/s390/cio/airq.c
new file mode 100644
index 000000000..56eb4ee4d
--- /dev/null
+++ b/kernel/drivers/s390/cio/airq.c
@@ -0,0 +1,275 @@
+/*
+ * Support for adapter interruptions
+ *
+ * Copyright IBM Corp. 1999, 2007
+ * Author(s): Ingo Adlung <adlung@de.ibm.com>
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Arnd Bergmann <arndb@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+
+#include <asm/airq.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+
+static DEFINE_SPINLOCK(airq_lists_lock);
+static struct hlist_head airq_lists[MAX_ISC+1];
+
+/**
+ * register_adapter_interrupt() - register adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
+ *
+ * Returns 0 on success, or -EINVAL.
+ */
+int register_adapter_interrupt(struct airq_struct *airq)
+{
+ char dbf_txt[32];
+
+ if (!airq->handler || airq->isc > MAX_ISC)
+ return -EINVAL;
+ if (!airq->lsi_ptr) {
+ airq->lsi_ptr = kzalloc(1, GFP_KERNEL);
+ if (!airq->lsi_ptr)
+ return -ENOMEM;
+ airq->flags |= AIRQ_PTR_ALLOCATED;
+ }
+ if (!airq->lsi_mask)
+ airq->lsi_mask = 0xff;
+ snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq);
+ CIO_TRACE_EVENT(4, dbf_txt);
+ isc_register(airq->isc);
+ spin_lock(&airq_lists_lock);
+ hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]);
+ spin_unlock(&airq_lists_lock);
+ return 0;
+}
+EXPORT_SYMBOL(register_adapter_interrupt);
+
+/**
+ * unregister_adapter_interrupt - unregister adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
+ */
+void unregister_adapter_interrupt(struct airq_struct *airq)
+{
+ char dbf_txt[32];
+
+ if (hlist_unhashed(&airq->list))
+ return;
+ snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq);
+ CIO_TRACE_EVENT(4, dbf_txt);
+ spin_lock(&airq_lists_lock);
+ hlist_del_rcu(&airq->list);
+ spin_unlock(&airq_lists_lock);
+ synchronize_rcu();
+ isc_unregister(airq->isc);
+ if (airq->flags & AIRQ_PTR_ALLOCATED) {
+ kfree(airq->lsi_ptr);
+ airq->lsi_ptr = NULL;
+ airq->flags &= ~AIRQ_PTR_ALLOCATED;
+ }
+}
+EXPORT_SYMBOL(unregister_adapter_interrupt);
+
+static irqreturn_t do_airq_interrupt(int irq, void *dummy)
+{
+ struct tpi_info *tpi_info;
+ struct airq_struct *airq;
+ struct hlist_head *head;
+
+ set_cpu_flag(CIF_NOHZ_DELAY);
+ tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+ head = &airq_lists[tpi_info->isc];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(airq, head, list)
+ if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
+ airq->handler(airq);
+ rcu_read_unlock();
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction airq_interrupt = {
+ .name = "AIO",
+ .handler = do_airq_interrupt,
+};
+
+void __init init_airq_interrupts(void)
+{
+ irq_set_chip_and_handler(THIN_INTERRUPT,
+ &dummy_irq_chip, handle_percpu_irq);
+ setup_irq(THIN_INTERRUPT, &airq_interrupt);
+}
+
+/**
+ * airq_iv_create - create an interrupt vector
+ * @bits: number of bits in the interrupt vector
+ * @flags: allocation flags
+ *
+ * Returns a pointer to an interrupt vector structure
+ */
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
+{
+ struct airq_iv *iv;
+ unsigned long size;
+
+ iv = kzalloc(sizeof(*iv), GFP_KERNEL);
+ if (!iv)
+ goto out;
+ iv->bits = bits;
+ size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
+ iv->vector = kzalloc(size, GFP_KERNEL);
+ if (!iv->vector)
+ goto out_free;
+ if (flags & AIRQ_IV_ALLOC) {
+ iv->avail = kmalloc(size, GFP_KERNEL);
+ if (!iv->avail)
+ goto out_free;
+ memset(iv->avail, 0xff, size);
+ iv->end = 0;
+ } else
+ iv->end = bits;
+ if (flags & AIRQ_IV_BITLOCK) {
+ iv->bitlock = kzalloc(size, GFP_KERNEL);
+ if (!iv->bitlock)
+ goto out_free;
+ }
+ if (flags & AIRQ_IV_PTR) {
+ size = bits * sizeof(unsigned long);
+ iv->ptr = kzalloc(size, GFP_KERNEL);
+ if (!iv->ptr)
+ goto out_free;
+ }
+ if (flags & AIRQ_IV_DATA) {
+ size = bits * sizeof(unsigned int);
+ iv->data = kzalloc(size, GFP_KERNEL);
+ if (!iv->data)
+ goto out_free;
+ }
+ spin_lock_init(&iv->lock);
+ return iv;
+
+out_free:
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ kfree(iv->avail);
+ kfree(iv->vector);
+ kfree(iv);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL(airq_iv_create);
+
+/**
+ * airq_iv_release - release an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ */
+void airq_iv_release(struct airq_iv *iv)
+{
+ kfree(iv->data);
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ kfree(iv->vector);
+ kfree(iv->avail);
+ kfree(iv);
+}
+EXPORT_SYMBOL(airq_iv_release);
+
+/**
+ * airq_iv_alloc - allocate irq bits from an interrupt vector
+ * @iv: pointer to an interrupt vector structure
+ * @num: number of consecutive irq bits to allocate
+ *
+ * Returns the bit number of the first irq in the allocated block of irqs,
+ * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
+ * specified
+ */
+unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
+{
+ unsigned long bit, i, flags;
+
+ if (!iv->avail || num == 0)
+ return -1UL;
+ spin_lock_irqsave(&iv->lock, flags);
+ bit = find_first_bit_inv(iv->avail, iv->bits);
+ while (bit + num <= iv->bits) {
+ for (i = 1; i < num; i++)
+ if (!test_bit_inv(bit + i, iv->avail))
+ break;
+ if (i >= num) {
+ /* Found a suitable block of irqs */
+ for (i = 0; i < num; i++)
+ clear_bit_inv(bit + i, iv->avail);
+ if (bit + num >= iv->end)
+ iv->end = bit + num + 1;
+ break;
+ }
+ bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
+ }
+ if (bit + num > iv->bits)
+ bit = -1UL;
+ spin_unlock_irqrestore(&iv->lock, flags);
+ return bit;
+}
+EXPORT_SYMBOL(airq_iv_alloc);
+
+/**
+ * airq_iv_free - free irq bits of an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ * @bit: number of the first irq bit to free
+ * @num: number of consecutive irq bits to free
+ */
+void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
+{
+ unsigned long i, flags;
+
+ if (!iv->avail || num == 0)
+ return;
+ spin_lock_irqsave(&iv->lock, flags);
+ for (i = 0; i < num; i++) {
+ /* Clear (possibly left over) interrupt bit */
+ clear_bit_inv(bit + i, iv->vector);
+ /* Make the bit positions available again */
+ set_bit_inv(bit + i, iv->avail);
+ }
+ if (bit + num >= iv->end) {
+ /* Find new end of bit-field */
+ while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
+ iv->end--;
+ }
+ spin_unlock_irqrestore(&iv->lock, flags);
+}
+EXPORT_SYMBOL(airq_iv_free);
+
+/**
+ * airq_iv_scan - scan interrupt vector for non-zero bits
+ * @iv: pointer to interrupt vector structure
+ * @start: bit number to start the search
+ * @end: bit number to end the search
+ *
+ * Returns the bit number of the next non-zero interrupt bit, or
+ * -1UL if the scan completed without finding any more any non-zero bits.
+ */
+unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
+ unsigned long end)
+{
+ unsigned long bit;
+
+ /* Find non-zero bit starting from 'ivs->next'. */
+ bit = find_next_bit_inv(iv->vector, end, start);
+ if (bit >= end)
+ return -1UL;
+ clear_bit_inv(bit, iv->vector);
+ return bit;
+}
+EXPORT_SYMBOL(airq_iv_scan);
diff --git a/kernel/drivers/s390/cio/blacklist.c b/kernel/drivers/s390/cio/blacklist.c
new file mode 100644
index 000000000..20314aad7
--- /dev/null
+++ b/kernel/drivers/s390/cio/blacklist.c
@@ -0,0 +1,423 @@
+/*
+ * S/390 common I/O routines -- blacklisting of specific devices
+ *
+ * Copyright IBM Corp. 1999, 2013
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+
+#include <asm/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ipl.h>
+
+#include "blacklist.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+
+/*
+ * "Blacklisting" of certain devices:
+ * Device numbers given in the commandline as cio_ignore=... won't be known
+ * to Linux.
+ *
+ * These can be single devices or ranges of devices
+ */
+
+/* 65536 bits for each set to indicate if a devno is blacklisted or not */
+#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
+ (8*sizeof(long)))
+static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
+typedef enum {add, free} range_action;
+
+/*
+ * Function: blacklist_range
+ * (Un-)blacklist the devices from-to
+ */
+static int blacklist_range(range_action action, unsigned int from_ssid,
+ unsigned int to_ssid, unsigned int from,
+ unsigned int to, int msgtrigger)
+{
+ if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
+ if (msgtrigger)
+ pr_warning("0.%x.%04x to 0.%x.%04x is not a valid "
+ "range for cio_ignore\n", from_ssid, from,
+ to_ssid, to);
+
+ return 1;
+ }
+
+ while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) &&
+ (from <= to))) {
+ if (action == add)
+ set_bit(from, bl_dev[from_ssid]);
+ else
+ clear_bit(from, bl_dev[from_ssid]);
+ from++;
+ if (from > __MAX_SUBCHANNEL) {
+ from_ssid++;
+ from = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int pure_hex(char **cp, unsigned int *val, int min_digit,
+ int max_digit, int max_val)
+{
+ int diff;
+
+ diff = 0;
+ *val = 0;
+
+ while (diff <= max_digit) {
+ int value = hex_to_bin(**cp);
+
+ if (value < 0)
+ break;
+ *val = *val * 16 + value;
+ (*cp)++;
+ diff++;
+ }
+
+ if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
+ return 1;
+
+ return 0;
+}
+
+static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
+ unsigned int *devno, int msgtrigger)
+{
+ char *str_work;
+ int val, rc, ret;
+
+ rc = 1;
+
+ if (*str == '\0')
+ goto out;
+
+ /* old style */
+ str_work = str;
+ val = simple_strtoul(str, &str_work, 16);
+
+ if (*str_work == '\0') {
+ if (val <= __MAX_SUBCHANNEL) {
+ *devno = val;
+ *ssid = 0;
+ *cssid = 0;
+ rc = 0;
+ }
+ goto out;
+ }
+
+ /* new style */
+ str_work = str;
+ ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
+ if (ret || (str_work[0] != '.'))
+ goto out;
+ str_work++;
+ ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
+ if (ret || (str_work[0] != '.'))
+ goto out;
+ str_work++;
+ ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
+ if (ret || (str_work[0] != '\0'))
+ goto out;
+
+ rc = 0;
+out:
+ if (rc && msgtrigger)
+ pr_warning("%s is not a valid device for the cio_ignore "
+ "kernel parameter\n", str);
+
+ return rc;
+}
+
+static int blacklist_parse_parameters(char *str, range_action action,
+ int msgtrigger)
+{
+ unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
+ int rc, totalrc;
+ char *parm;
+ range_action ra;
+
+ totalrc = 0;
+
+ while ((parm = strsep(&str, ","))) {
+ rc = 0;
+ ra = action;
+ if (*parm == '!') {
+ if (ra == add)
+ ra = free;
+ else
+ ra = add;
+ parm++;
+ }
+ if (strcmp(parm, "all") == 0) {
+ from_cssid = 0;
+ from_ssid = 0;
+ from = 0;
+ to_cssid = __MAX_CSSID;
+ to_ssid = __MAX_SSID;
+ to = __MAX_SUBCHANNEL;
+ } else if (strcmp(parm, "ipldev") == 0) {
+ if (ipl_info.type == IPL_TYPE_CCW) {
+ from_cssid = 0;
+ from_ssid = ipl_info.data.ccw.dev_id.ssid;
+ from = ipl_info.data.ccw.dev_id.devno;
+ } else if (ipl_info.type == IPL_TYPE_FCP ||
+ ipl_info.type == IPL_TYPE_FCP_DUMP) {
+ from_cssid = 0;
+ from_ssid = ipl_info.data.fcp.dev_id.ssid;
+ from = ipl_info.data.fcp.dev_id.devno;
+ } else {
+ continue;
+ }
+ to_cssid = from_cssid;
+ to_ssid = from_ssid;
+ to = from;
+ } else if (strcmp(parm, "condev") == 0) {
+ if (console_devno == -1)
+ continue;
+
+ from_cssid = to_cssid = 0;
+ from_ssid = to_ssid = 0;
+ from = to = console_devno;
+ } else {
+ rc = parse_busid(strsep(&parm, "-"), &from_cssid,
+ &from_ssid, &from, msgtrigger);
+ if (!rc) {
+ if (parm != NULL)
+ rc = parse_busid(parm, &to_cssid,
+ &to_ssid, &to,
+ msgtrigger);
+ else {
+ to_cssid = from_cssid;
+ to_ssid = from_ssid;
+ to = from;
+ }
+ }
+ }
+ if (!rc) {
+ rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
+ msgtrigger);
+ if (rc)
+ totalrc = -EINVAL;
+ } else
+ totalrc = -EINVAL;
+ }
+
+ return totalrc;
+}
+
+static int __init
+blacklist_setup (char *str)
+{
+ CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
+ if (blacklist_parse_parameters(str, add, 1))
+ return 0;
+ return 1;
+}
+
+__setup ("cio_ignore=", blacklist_setup);
+
+/* Checking if devices are blacklisted */
+
+/*
+ * Function: is_blacklisted
+ * Returns 1 if the given devicenumber can be found in the blacklist,
+ * otherwise 0.
+ * Used by validate_subchannel()
+ */
+int
+is_blacklisted (int ssid, int devno)
+{
+ return test_bit (devno, bl_dev[ssid]);
+}
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Function: blacklist_parse_proc_parameters
+ * parse the stuff which is piped to /proc/cio_ignore
+ */
+static int blacklist_parse_proc_parameters(char *buf)
+{
+ int rc;
+ char *parm;
+
+ parm = strsep(&buf, " ");
+
+ if (strcmp("free", parm) == 0) {
+ rc = blacklist_parse_parameters(buf, free, 0);
+ css_schedule_eval_all_unreg(0);
+ } else if (strcmp("add", parm) == 0)
+ rc = blacklist_parse_parameters(buf, add, 0);
+ else if (strcmp("purge", parm) == 0)
+ return ccw_purge_blacklisted();
+ else
+ return -EINVAL;
+
+
+ return rc;
+}
+
+/* Iterator struct for all devices. */
+struct ccwdev_iter {
+ int devno;
+ int ssid;
+ int in_range;
+};
+
+static void *
+cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
+{
+ struct ccwdev_iter *iter = s->private;
+
+ if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ return NULL;
+ memset(iter, 0, sizeof(*iter));
+ iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
+ iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
+ return iter;
+}
+
+static void
+cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
+{
+}
+
+static void *
+cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
+{
+ struct ccwdev_iter *iter;
+
+ if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ return NULL;
+ iter = it;
+ if (iter->devno == __MAX_SUBCHANNEL) {
+ iter->devno = 0;
+ iter->ssid++;
+ if (iter->ssid > __MAX_SSID)
+ return NULL;
+ } else
+ iter->devno++;
+ (*offset)++;
+ return iter;
+}
+
+static int
+cio_ignore_proc_seq_show(struct seq_file *s, void *it)
+{
+ struct ccwdev_iter *iter;
+
+ iter = it;
+ if (!is_blacklisted(iter->ssid, iter->devno))
+ /* Not blacklisted, nothing to output. */
+ return 0;
+ if (!iter->in_range) {
+ /* First device in range. */
+ if ((iter->devno == __MAX_SUBCHANNEL) ||
+ !is_blacklisted(iter->ssid, iter->devno + 1)) {
+ /* Singular device. */
+ seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
+ return 0;
+ }
+ iter->in_range = 1;
+ seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
+ return 0;
+ }
+ if ((iter->devno == __MAX_SUBCHANNEL) ||
+ !is_blacklisted(iter->ssid, iter->devno + 1)) {
+ /* Last device in range. */
+ iter->in_range = 0;
+ seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
+ }
+ return 0;
+}
+
+static ssize_t
+cio_ignore_write(struct file *file, const char __user *user_buf,
+ size_t user_len, loff_t *offset)
+{
+ char *buf;
+ ssize_t rc, ret, i;
+
+ if (*offset)
+ return -EINVAL;
+ if (user_len > 65536)
+ user_len = 65536;
+ buf = vzalloc(user_len + 1); /* maybe better use the stack? */
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (strncpy_from_user (buf, user_buf, user_len) < 0) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+
+ i = user_len - 1;
+ while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) {
+ buf[i] = '\0';
+ i--;
+ }
+ ret = blacklist_parse_proc_parameters(buf);
+ if (ret)
+ rc = ret;
+ else
+ rc = user_len;
+
+out_free:
+ vfree (buf);
+ return rc;
+}
+
+static const struct seq_operations cio_ignore_proc_seq_ops = {
+ .start = cio_ignore_proc_seq_start,
+ .stop = cio_ignore_proc_seq_stop,
+ .next = cio_ignore_proc_seq_next,
+ .show = cio_ignore_proc_seq_show,
+};
+
+static int
+cio_ignore_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open_private(file, &cio_ignore_proc_seq_ops,
+ sizeof(struct ccwdev_iter));
+}
+
+static const struct file_operations cio_ignore_proc_fops = {
+ .open = cio_ignore_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+ .write = cio_ignore_write,
+};
+
+static int
+cio_ignore_proc_init (void)
+{
+ struct proc_dir_entry *entry;
+
+ entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
+ &cio_ignore_proc_fops);
+ if (!entry)
+ return -ENOENT;
+ return 0;
+}
+
+__initcall (cio_ignore_proc_init);
+
+#endif /* CONFIG_PROC_FS */
diff --git a/kernel/drivers/s390/cio/blacklist.h b/kernel/drivers/s390/cio/blacklist.h
new file mode 100644
index 000000000..95e25c1df
--- /dev/null
+++ b/kernel/drivers/s390/cio/blacklist.h
@@ -0,0 +1,6 @@
+#ifndef S390_BLACKLIST_H
+#define S390_BLACKLIST_H
+
+extern int is_blacklisted (int ssid, int devno);
+
+#endif
diff --git a/kernel/drivers/s390/cio/ccwgroup.c b/kernel/drivers/s390/cio/ccwgroup.c
new file mode 100644
index 000000000..e443b0d0b
--- /dev/null
+++ b/kernel/drivers/s390/cio/ccwgroup.c
@@ -0,0 +1,641 @@
+/*
+ * bus driver for ccwgroup
+ *
+ * Copyright IBM Corp. 2002, 2012
+ *
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/dcache.h>
+
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+
+#include "device.h"
+
+#define CCW_BUS_ID_SIZE 10
+
+/* In Linux 2.4, we had a channel device layer called "chandev"
+ * that did all sorts of obscure stuff for networking devices.
+ * This is another driver that serves as a replacement for just
+ * one of its functions, namely the translation of single subchannels
+ * to devices that use multiple subchannels.
+ */
+
+static struct bus_type ccwgroup_bus_type;
+
+static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
+{
+ int i;
+ char str[8];
+
+ for (i = 0; i < gdev->count; i++) {
+ sprintf(str, "cdev%d", i);
+ sysfs_remove_link(&gdev->dev.kobj, str);
+ sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
+ }
+}
+
+/*
+ * Remove references from ccw devices to ccw group device and from
+ * ccw group device to ccw devices.
+ */
+static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
+{
+ struct ccw_device *cdev;
+ int i;
+
+ for (i = 0; i < gdev->count; i++) {
+ cdev = gdev->cdev[i];
+ if (!cdev)
+ continue;
+ spin_lock_irq(cdev->ccwlock);
+ dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irq(cdev->ccwlock);
+ gdev->cdev[i] = NULL;
+ put_device(&cdev->dev);
+ }
+}
+
+/**
+ * ccwgroup_set_online() - enable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the online state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_online(struct ccwgroup_device *gdev)
+{
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+ int ret = -EINVAL;
+
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_ONLINE)
+ goto out;
+ if (gdrv->set_online)
+ ret = gdrv->set_online(gdev);
+ if (ret)
+ goto out;
+
+ gdev->state = CCWGROUP_ONLINE;
+out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
+}
+EXPORT_SYMBOL(ccwgroup_set_online);
+
+/**
+ * ccwgroup_set_offline() - disable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the offline state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+{
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+ int ret = -EINVAL;
+
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_OFFLINE)
+ goto out;
+ if (gdrv->set_offline)
+ ret = gdrv->set_offline(gdev);
+ if (ret)
+ goto out;
+
+ gdev->state = CCWGROUP_OFFLINE;
+out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
+}
+EXPORT_SYMBOL(ccwgroup_set_offline);
+
+static ssize_t ccwgroup_online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ unsigned long value;
+ int ret;
+
+ device_lock(dev);
+ if (!dev->driver) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret)
+ goto out;
+
+ if (value == 1)
+ ret = ccwgroup_set_online(gdev);
+ else if (value == 0)
+ ret = ccwgroup_set_offline(gdev);
+ else
+ ret = -EINVAL;
+out:
+ device_unlock(dev);
+ return (ret == 0) ? count : ret;
+}
+
+static ssize_t ccwgroup_online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ int online;
+
+ online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", online);
+}
+
+/*
+ * Provide an 'ungroup' attribute so the user can remove group devices no
+ * longer needed or accidentially created. Saves memory :)
+ */
+static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
+{
+ mutex_lock(&gdev->reg_mutex);
+ if (device_is_registered(&gdev->dev)) {
+ __ccwgroup_remove_symlinks(gdev);
+ device_unregister(&gdev->dev);
+ __ccwgroup_remove_cdev_refs(gdev);
+ }
+ mutex_unlock(&gdev->reg_mutex);
+}
+
+static ssize_t ccwgroup_ungroup_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ int rc = 0;
+
+ /* Prevent concurrent online/offline processing and ungrouping. */
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state != CCWGROUP_OFFLINE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (device_remove_file_self(dev, attr))
+ ccwgroup_ungroup(gdev);
+ else
+ rc = -ENODEV;
+out:
+ if (rc) {
+ /* Release onoff "lock" when ungrouping failed. */
+ atomic_set(&gdev->onoff, 0);
+ return rc;
+ }
+ return count;
+}
+static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
+static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
+
+static struct attribute *ccwgroup_attrs[] = {
+ &dev_attr_online.attr,
+ &dev_attr_ungroup.attr,
+ NULL,
+};
+static struct attribute_group ccwgroup_attr_group = {
+ .attrs = ccwgroup_attrs,
+};
+static const struct attribute_group *ccwgroup_attr_groups[] = {
+ &ccwgroup_attr_group,
+ NULL,
+};
+
+static void ccwgroup_ungroup_workfn(struct work_struct *work)
+{
+ struct ccwgroup_device *gdev =
+ container_of(work, struct ccwgroup_device, ungroup_work);
+
+ ccwgroup_ungroup(gdev);
+ put_device(&gdev->dev);
+}
+
+static void ccwgroup_release(struct device *dev)
+{
+ kfree(to_ccwgroupdev(dev));
+}
+
+static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
+{
+ char str[8];
+ int i, rc;
+
+ for (i = 0; i < gdev->count; i++) {
+ rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
+ &gdev->dev.kobj, "group_device");
+ if (rc) {
+ for (--i; i >= 0; i--)
+ sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
+ "group_device");
+ return rc;
+ }
+ }
+ for (i = 0; i < gdev->count; i++) {
+ sprintf(str, "cdev%d", i);
+ rc = sysfs_create_link(&gdev->dev.kobj,
+ &gdev->cdev[i]->dev.kobj, str);
+ if (rc) {
+ for (--i; i >= 0; i--) {
+ sprintf(str, "cdev%d", i);
+ sysfs_remove_link(&gdev->dev.kobj, str);
+ }
+ for (i = 0; i < gdev->count; i++)
+ sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
+ "group_device");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int __get_next_id(const char **buf, struct ccw_dev_id *id)
+{
+ unsigned int cssid, ssid, devno;
+ int ret = 0, len;
+ char *start, *end;
+
+ start = (char *)*buf;
+ end = strchr(start, ',');
+ if (!end) {
+ /* Last entry. Strip trailing newline, if applicable. */
+ end = strchr(start, '\n');
+ if (end)
+ *end = '\0';
+ len = strlen(start) + 1;
+ } else {
+ len = end - start + 1;
+ end++;
+ }
+ if (len <= CCW_BUS_ID_SIZE) {
+ if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3)
+ ret = -EINVAL;
+ } else
+ ret = -EINVAL;
+
+ if (!ret) {
+ id->ssid = ssid;
+ id->devno = devno;
+ }
+ *buf = end;
+ return ret;
+}
+
+/**
+ * ccwgroup_create_dev() - create and register a ccw group device
+ * @parent: parent device for the new device
+ * @gdrv: driver for the new group device
+ * @num_devices: number of slave devices
+ * @buf: buffer containing comma separated bus ids of slave devices
+ *
+ * Create and register a new ccw group device as a child of @parent. Slave
+ * devices are obtained from the list of bus ids given in @buf.
+ * Returns:
+ * %0 on success and an error code on failure.
+ * Context:
+ * non-atomic
+ */
+int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
+ int num_devices, const char *buf)
+{
+ struct ccwgroup_device *gdev;
+ struct ccw_dev_id dev_id;
+ int rc, i;
+
+ gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
+ GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ atomic_set(&gdev->onoff, 0);
+ mutex_init(&gdev->reg_mutex);
+ mutex_lock(&gdev->reg_mutex);
+ INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn);
+ gdev->count = num_devices;
+ gdev->dev.bus = &ccwgroup_bus_type;
+ gdev->dev.parent = parent;
+ gdev->dev.release = ccwgroup_release;
+ device_initialize(&gdev->dev);
+
+ for (i = 0; i < num_devices && buf; i++) {
+ rc = __get_next_id(&buf, &dev_id);
+ if (rc != 0)
+ goto error;
+ gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id);
+ /*
+ * All devices have to be of the same type in
+ * order to be grouped.
+ */
+ if (!gdev->cdev[i] || !gdev->cdev[i]->drv ||
+ gdev->cdev[i]->drv != gdev->cdev[0]->drv ||
+ gdev->cdev[i]->id.driver_info !=
+ gdev->cdev[0]->id.driver_info) {
+ rc = -EINVAL;
+ goto error;
+ }
+ /* Don't allow a device to belong to more than one group. */
+ spin_lock_irq(gdev->cdev[i]->ccwlock);
+ if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
+ rc = -EINVAL;
+ goto error;
+ }
+ dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
+ }
+ /* Check for sufficient number of bus ids. */
+ if (i < num_devices) {
+ rc = -EINVAL;
+ goto error;
+ }
+ /* Check for trailing stuff. */
+ if (i == num_devices && strlen(buf) > 0) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
+ gdev->dev.groups = ccwgroup_attr_groups;
+
+ if (gdrv) {
+ gdev->dev.driver = &gdrv->driver;
+ rc = gdrv->setup ? gdrv->setup(gdev) : 0;
+ if (rc)
+ goto error;
+ }
+ rc = device_add(&gdev->dev);
+ if (rc)
+ goto error;
+ rc = __ccwgroup_create_symlinks(gdev);
+ if (rc) {
+ device_del(&gdev->dev);
+ goto error;
+ }
+ mutex_unlock(&gdev->reg_mutex);
+ return 0;
+error:
+ for (i = 0; i < num_devices; i++)
+ if (gdev->cdev[i]) {
+ spin_lock_irq(gdev->cdev[i]->ccwlock);
+ if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
+ dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
+ put_device(&gdev->cdev[i]->dev);
+ gdev->cdev[i] = NULL;
+ }
+ mutex_unlock(&gdev->reg_mutex);
+ put_device(&gdev->dev);
+ return rc;
+}
+EXPORT_SYMBOL(ccwgroup_create_dev);
+
+static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(data);
+
+ if (action == BUS_NOTIFY_UNBIND_DRIVER) {
+ get_device(&gdev->dev);
+ schedule_work(&gdev->ungroup_work);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ccwgroup_nb = {
+ .notifier_call = ccwgroup_notifier
+};
+
+static int __init init_ccwgroup(void)
+{
+ int ret;
+
+ ret = bus_register(&ccwgroup_bus_type);
+ if (ret)
+ return ret;
+
+ ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+ if (ret)
+ bus_unregister(&ccwgroup_bus_type);
+
+ return ret;
+}
+
+static void __exit cleanup_ccwgroup(void)
+{
+ bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+ bus_unregister(&ccwgroup_bus_type);
+}
+
+module_init(init_ccwgroup);
+module_exit(cleanup_ccwgroup);
+
+/************************** driver stuff ******************************/
+
+static int ccwgroup_remove(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+ if (!dev->driver)
+ return 0;
+ if (gdrv->remove)
+ gdrv->remove(gdev);
+
+ return 0;
+}
+
+static void ccwgroup_shutdown(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+ if (!dev->driver)
+ return;
+ if (gdrv->shutdown)
+ gdrv->shutdown(gdev);
+}
+
+static int ccwgroup_pm_prepare(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+ /* Fail while device is being set online/offline. */
+ if (atomic_read(&gdev->onoff))
+ return -EAGAIN;
+
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return 0;
+
+ return gdrv->prepare ? gdrv->prepare(gdev) : 0;
+}
+
+static void ccwgroup_pm_complete(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return;
+
+ if (gdrv->complete)
+ gdrv->complete(gdev);
+}
+
+static int ccwgroup_pm_freeze(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return 0;
+
+ return gdrv->freeze ? gdrv->freeze(gdev) : 0;
+}
+
+static int ccwgroup_pm_thaw(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return 0;
+
+ return gdrv->thaw ? gdrv->thaw(gdev) : 0;
+}
+
+static int ccwgroup_pm_restore(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+ if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+ return 0;
+
+ return gdrv->restore ? gdrv->restore(gdev) : 0;
+}
+
+static const struct dev_pm_ops ccwgroup_pm_ops = {
+ .prepare = ccwgroup_pm_prepare,
+ .complete = ccwgroup_pm_complete,
+ .freeze = ccwgroup_pm_freeze,
+ .thaw = ccwgroup_pm_thaw,
+ .restore = ccwgroup_pm_restore,
+};
+
+static struct bus_type ccwgroup_bus_type = {
+ .name = "ccwgroup",
+ .remove = ccwgroup_remove,
+ .shutdown = ccwgroup_shutdown,
+ .pm = &ccwgroup_pm_ops,
+};
+
+/**
+ * ccwgroup_driver_register() - register a ccw group driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ */
+int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
+{
+ /* register our new driver with the core */
+ cdriver->driver.bus = &ccwgroup_bus_type;
+
+ return driver_register(&cdriver->driver);
+}
+EXPORT_SYMBOL(ccwgroup_driver_register);
+
+static int __ccwgroup_match_all(struct device *dev, void *data)
+{
+ return 1;
+}
+
+/**
+ * ccwgroup_driver_unregister() - deregister a ccw group driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
+{
+ struct device *dev;
+
+ /* We don't want ccwgroup devices to live longer than their driver. */
+ while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
+ __ccwgroup_match_all))) {
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+
+ ccwgroup_ungroup(gdev);
+ put_device(dev);
+ }
+ driver_unregister(&cdriver->driver);
+}
+EXPORT_SYMBOL(ccwgroup_driver_unregister);
+
+/**
+ * ccwgroup_probe_ccwdev() - probe function for slave devices
+ * @cdev: ccw device to be probed
+ *
+ * This is a dummy probe function for ccw devices that are slave devices in
+ * a ccw group device.
+ * Returns:
+ * always %0
+ */
+int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
+
+/**
+ * ccwgroup_remove_ccwdev() - remove function for slave devices
+ * @cdev: ccw device to be removed
+ *
+ * This is a remove function for ccw devices that are slave devices in a ccw
+ * group device. It sets the ccw device offline and also deregisters the
+ * embedding ccw group device.
+ */
+void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
+{
+ struct ccwgroup_device *gdev;
+
+ /* Ignore offlining errors, device is gone anyway. */
+ ccw_device_set_offline(cdev);
+ /* If one of its devices is gone, the whole group is done for. */
+ spin_lock_irq(cdev->ccwlock);
+ gdev = dev_get_drvdata(&cdev->dev);
+ if (!gdev) {
+ spin_unlock_irq(cdev->ccwlock);
+ return;
+ }
+ /* Get ccwgroup device reference for local processing. */
+ get_device(&gdev->dev);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Unregister group device. */
+ ccwgroup_ungroup(gdev);
+ /* Release ccwgroup device reference for local processing. */
+ put_device(&gdev->dev);
+}
+EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/s390/cio/ccwreq.c b/kernel/drivers/s390/cio/ccwreq.c
new file mode 100644
index 000000000..79f59915f
--- /dev/null
+++ b/kernel/drivers/s390/cio/ccwreq.c
@@ -0,0 +1,367 @@
+/*
+ * Handling of internal CCW device requests.
+ *
+ * Copyright IBM Corp. 2009, 2011
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "io_sch.h"
+#include "cio.h"
+#include "device.h"
+#include "cio_debug.h"
+
+/**
+ * lpm_adjust - adjust path mask
+ * @lpm: path mask to adjust
+ * @mask: mask of available paths
+ *
+ * Shift @lpm right until @lpm and @mask have at least one bit in common or
+ * until @lpm is zero. Return the resulting lpm.
+ */
+int lpm_adjust(int lpm, int mask)
+{
+ while (lpm && ((lpm & mask) == 0))
+ lpm >>= 1;
+ return lpm;
+}
+
+/*
+ * Adjust path mask to use next path and reset retry count. Return resulting
+ * path mask.
+ */
+static u16 ccwreq_next_path(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ if (!req->singlepath) {
+ req->mask = 0;
+ goto out;
+ }
+ req->retries = req->maxretries;
+ req->mask = lpm_adjust(req->mask >> 1, req->lpm);
+out:
+ return req->mask;
+}
+
+/*
+ * Clean up device state and report to callback.
+ */
+static void ccwreq_stop(struct ccw_device *cdev, int rc)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ if (req->done)
+ return;
+ req->done = 1;
+ ccw_device_set_timeout(cdev, 0);
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ if (rc && rc != -ENODEV && req->drc)
+ rc = req->drc;
+ req->callback(cdev, req->data, rc);
+}
+
+/*
+ * (Re-)Start the operation until retries and paths are exhausted.
+ */
+static void ccwreq_do(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw1 *cp = req->cp;
+ int rc = -EACCES;
+
+ while (req->mask) {
+ if (req->retries-- == 0) {
+ /* Retries exhausted, try next path. */
+ ccwreq_next_path(cdev);
+ continue;
+ }
+ /* Perform start function. */
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ rc = cio_start(sch, cp, (u8) req->mask);
+ if (rc == 0) {
+ /* I/O started successfully. */
+ ccw_device_set_timeout(cdev, req->timeout);
+ return;
+ }
+ if (rc == -ENODEV) {
+ /* Permanent device error. */
+ break;
+ }
+ if (rc == -EACCES) {
+ /* Permant path error. */
+ ccwreq_next_path(cdev);
+ continue;
+ }
+ /* Temporary improper status. */
+ rc = cio_clear(sch);
+ if (rc)
+ break;
+ return;
+ }
+ ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_start - perform I/O request
+ * @cdev: ccw device
+ *
+ * Perform the I/O request specified by cdev->req.
+ */
+void ccw_request_start(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ if (req->singlepath) {
+ /* Try all paths twice to counter link flapping. */
+ req->mask = 0x8080;
+ } else
+ req->mask = req->lpm;
+
+ req->retries = req->maxretries;
+ req->mask = lpm_adjust(req->mask, req->lpm);
+ req->drc = 0;
+ req->done = 0;
+ req->cancel = 0;
+ if (!req->mask)
+ goto out_nopath;
+ ccwreq_do(cdev);
+ return;
+
+out_nopath:
+ ccwreq_stop(cdev, -EACCES);
+}
+
+/**
+ * ccw_request_cancel - cancel running I/O request
+ * @cdev: ccw device
+ *
+ * Cancel the I/O request specified by cdev->req. Return non-zero if request
+ * has already finished, zero otherwise.
+ */
+int ccw_request_cancel(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int rc;
+
+ if (req->done)
+ return 1;
+ req->cancel = 1;
+ rc = cio_clear(sch);
+ if (rc)
+ ccwreq_stop(cdev, rc);
+ return 0;
+}
+
+/*
+ * Return the status of the internal I/O started on the specified ccw device.
+ * Perform BASIC SENSE if required.
+ */
+static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
+{
+ struct irb *irb = &cdev->private->irb;
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ enum uc_todo todo;
+
+ /* Perform BASIC SENSE if needed. */
+ if (ccw_device_accumulate_and_sense(cdev, lcirb))
+ return IO_RUNNING;
+ /* Check for halt/clear interrupt. */
+ if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ return IO_KILLED;
+ /* Check for path error. */
+ if (scsw->cc == 3 || scsw->pno)
+ return IO_PATH_ERROR;
+ /* Handle BASIC SENSE data. */
+ if (irb->esw.esw0.erw.cons) {
+ CIO_TRACE_EVENT(2, "sensedata");
+ CIO_HEX_EVENT(2, &cdev->private->dev_id,
+ sizeof(struct ccw_dev_id));
+ CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
+ /* Check for command reject. */
+ if (irb->ecw[0] & SNS0_CMD_REJECT)
+ return IO_REJECTED;
+ /* Ask the driver what to do */
+ if (cdev->drv && cdev->drv->uc_handler) {
+ todo = cdev->drv->uc_handler(cdev, lcirb);
+ CIO_TRACE_EVENT(2, "uc_response");
+ CIO_HEX_EVENT(2, &todo, sizeof(todo));
+ switch (todo) {
+ case UC_TODO_RETRY:
+ return IO_STATUS_ERROR;
+ case UC_TODO_RETRY_ON_NEW_PATH:
+ return IO_PATH_ERROR;
+ case UC_TODO_STOP:
+ return IO_REJECTED;
+ default:
+ return IO_STATUS_ERROR;
+ }
+ }
+ /* Assume that unexpected SENSE data implies an error. */
+ return IO_STATUS_ERROR;
+ }
+ /* Check for channel errors. */
+ if (scsw->cstat != 0)
+ return IO_STATUS_ERROR;
+ /* Check for device errors. */
+ if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return IO_STATUS_ERROR;
+ /* Check for final state. */
+ if (!(scsw->dstat & DEV_STAT_DEV_END))
+ return IO_RUNNING;
+ /* Check for other improper status. */
+ if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
+ return IO_STATUS_ERROR;
+ return IO_DONE;
+}
+
+/*
+ * Log ccw request status.
+ */
+static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct {
+ struct ccw_dev_id dev_id;
+ u16 retries;
+ u8 lpm;
+ u8 status;
+ } __attribute__ ((packed)) data;
+ data.dev_id = cdev->private->dev_id;
+ data.retries = req->retries;
+ data.lpm = (u8) req->mask;
+ data.status = (u8) status;
+ CIO_TRACE_EVENT(2, "reqstat");
+ CIO_HEX_EVENT(2, &data, sizeof(data));
+}
+
+/**
+ * ccw_request_handler - interrupt handler for I/O request procedure.
+ * @cdev: ccw device
+ *
+ * Handle interrupt during I/O request procedure.
+ */
+void ccw_request_handler(struct ccw_device *cdev)
+{
+ struct irb *irb = this_cpu_ptr(&cio_irb);
+ struct ccw_request *req = &cdev->private->req;
+ enum io_status status;
+ int rc = -EOPNOTSUPP;
+
+ /* Check status of I/O request. */
+ status = ccwreq_status(cdev, irb);
+ if (req->filter)
+ status = req->filter(cdev, req->data, irb, status);
+ if (status != IO_RUNNING)
+ ccw_device_set_timeout(cdev, 0);
+ if (status != IO_DONE && status != IO_RUNNING)
+ ccwreq_log_status(cdev, status);
+ switch (status) {
+ case IO_DONE:
+ break;
+ case IO_RUNNING:
+ return;
+ case IO_REJECTED:
+ goto err;
+ case IO_PATH_ERROR:
+ goto out_next_path;
+ case IO_STATUS_ERROR:
+ goto out_restart;
+ case IO_KILLED:
+ /* Check if request was cancelled on purpose. */
+ if (req->cancel) {
+ rc = -EIO;
+ goto err;
+ }
+ goto out_restart;
+ }
+ /* Check back with request initiator. */
+ if (!req->check)
+ goto out;
+ switch (req->check(cdev, req->data)) {
+ case 0:
+ break;
+ case -EAGAIN:
+ goto out_restart;
+ case -EACCES:
+ goto out_next_path;
+ default:
+ goto err;
+ }
+out:
+ ccwreq_stop(cdev, 0);
+ return;
+
+out_next_path:
+ /* Try next path and restart I/O. */
+ if (!ccwreq_next_path(cdev)) {
+ rc = -EACCES;
+ goto err;
+ }
+out_restart:
+ /* Restart. */
+ ccwreq_do(cdev);
+ return;
+err:
+ ccwreq_stop(cdev, rc);
+}
+
+
+/**
+ * ccw_request_timeout - timeout handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle timeout during I/O request procedure.
+ */
+void ccw_request_timeout(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int rc = -ENODEV, chp;
+
+ if (cio_update_schib(sch))
+ goto err;
+
+ for (chp = 0; chp < 8; chp++) {
+ if ((0x80 >> chp) & sch->schib.pmcw.lpum)
+ pr_warning("%s: No interrupt was received within %lus "
+ "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
+ dev_name(&cdev->dev), req->timeout / HZ,
+ scsw_cstat(&sch->schib.scsw),
+ scsw_dstat(&sch->schib.scsw),
+ sch->schid.cssid,
+ sch->schib.pmcw.chpid[chp]);
+ }
+
+ if (!ccwreq_next_path(cdev)) {
+ /* set the final return code for this request */
+ req->drc = -ETIME;
+ }
+ rc = cio_clear(sch);
+ if (rc)
+ goto err;
+ return;
+
+err:
+ ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_notoper - notoper handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle notoper during I/O request procedure.
+ */
+void ccw_request_notoper(struct ccw_device *cdev)
+{
+ ccwreq_stop(cdev, -ENODEV);
+}
diff --git a/kernel/drivers/s390/cio/chp.c b/kernel/drivers/s390/cio/chp.c
new file mode 100644
index 000000000..c692dfebd
--- /dev/null
+++ b/kernel/drivers/s390/cio/chp.c
@@ -0,0 +1,792 @@
+/*
+ * Copyright IBM Corp. 1999, 2010
+ * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <asm/chpid.h>
+#include <asm/sclp.h>
+#include <asm/crw.h>
+
+#include "cio.h"
+#include "css.h"
+#include "ioasm.h"
+#include "cio_debug.h"
+#include "chp.h"
+
+#define to_channelpath(device) container_of(device, struct channel_path, dev)
+#define CHP_INFO_UPDATE_INTERVAL 1*HZ
+
+enum cfg_task_t {
+ cfg_none,
+ cfg_configure,
+ cfg_deconfigure
+};
+
+/* Map for pending configure tasks. */
+static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
+static DEFINE_MUTEX(cfg_lock);
+static int cfg_busy;
+
+/* Map for channel-path status. */
+static struct sclp_chp_info chp_info;
+static DEFINE_MUTEX(info_lock);
+
+/* Time after which channel-path status may be outdated. */
+static unsigned long chp_info_expires;
+
+/* Workqueue to perform pending configure tasks. */
+static struct workqueue_struct *chp_wq;
+static struct work_struct cfg_work;
+
+/* Wait queue for configure completion events. */
+static wait_queue_head_t cfg_wait_queue;
+
+/* Set vary state for given chpid. */
+static void set_chp_logically_online(struct chp_id chpid, int onoff)
+{
+ chpid_to_chp(chpid)->state = onoff;
+}
+
+/* On success return 0 if channel-path is varied offline, 1 if it is varied
+ * online. Return -ENODEV if channel-path is not registered. */
+int chp_get_status(struct chp_id chpid)
+{
+ return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
+}
+
+/**
+ * chp_get_sch_opm - return opm for subchannel
+ * @sch: subchannel
+ *
+ * Calculate and return the operational path mask (opm) based on the chpids
+ * used by the subchannel and the status of the associated channel-paths.
+ */
+u8 chp_get_sch_opm(struct subchannel *sch)
+{
+ struct chp_id chpid;
+ int opm;
+ int i;
+
+ opm = 0;
+ chp_id_init(&chpid);
+ for (i = 0; i < 8; i++) {
+ opm <<= 1;
+ chpid.id = sch->schib.pmcw.chpid[i];
+ if (chp_get_status(chpid) != 0)
+ opm |= 1;
+ }
+ return opm;
+}
+EXPORT_SYMBOL_GPL(chp_get_sch_opm);
+
+/**
+ * chp_is_registered - check if a channel-path is registered
+ * @chpid: channel-path ID
+ *
+ * Return non-zero if a channel-path with the given chpid is registered,
+ * zero otherwise.
+ */
+int chp_is_registered(struct chp_id chpid)
+{
+ return chpid_to_chp(chpid) != NULL;
+}
+
+/*
+ * Function: s390_vary_chpid
+ * Varies the specified chpid online or offline
+ */
+static int s390_vary_chpid(struct chp_id chpid, int on)
+{
+ char dbf_text[15];
+ int status;
+
+ sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
+ chpid.id);
+ CIO_TRACE_EVENT(2, dbf_text);
+
+ status = chp_get_status(chpid);
+ if (!on && !status)
+ return 0;
+
+ set_chp_logically_online(chpid, on);
+ chsc_chp_vary(chpid, on);
+ return 0;
+}
+
+/*
+ * Channel measurement related functions
+ */
+static ssize_t chp_measurement_chars_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct channel_path *chp;
+ struct device *device;
+
+ device = container_of(kobj, struct device, kobj);
+ chp = to_channelpath(device);
+ if (!chp->cmg_chars)
+ return 0;
+
+ return memory_read_from_buffer(buf, count, &off,
+ chp->cmg_chars, sizeof(struct cmg_chars));
+}
+
+static struct bin_attribute chp_measurement_chars_attr = {
+ .attr = {
+ .name = "measurement_chars",
+ .mode = S_IRUSR,
+ },
+ .size = sizeof(struct cmg_chars),
+ .read = chp_measurement_chars_read,
+};
+
+static void chp_measurement_copy_block(struct cmg_entry *buf,
+ struct channel_subsystem *css,
+ struct chp_id chpid)
+{
+ void *area;
+ struct cmg_entry *entry, reference_buf;
+ int idx;
+
+ if (chpid.id < 128) {
+ area = css->cub_addr1;
+ idx = chpid.id;
+ } else {
+ area = css->cub_addr2;
+ idx = chpid.id - 128;
+ }
+ entry = area + (idx * sizeof(struct cmg_entry));
+ do {
+ memcpy(buf, entry, sizeof(*entry));
+ memcpy(&reference_buf, entry, sizeof(*entry));
+ } while (reference_buf.values[0] != buf->values[0]);
+}
+
+static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct channel_path *chp;
+ struct channel_subsystem *css;
+ struct device *device;
+ unsigned int size;
+
+ device = container_of(kobj, struct device, kobj);
+ chp = to_channelpath(device);
+ css = to_css(chp->dev.parent);
+
+ size = sizeof(struct cmg_entry);
+
+ /* Only allow single reads. */
+ if (off || count < size)
+ return 0;
+ chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
+ count = size;
+ return count;
+}
+
+static struct bin_attribute chp_measurement_attr = {
+ .attr = {
+ .name = "measurement",
+ .mode = S_IRUSR,
+ },
+ .size = sizeof(struct cmg_entry),
+ .read = chp_measurement_read,
+};
+
+void chp_remove_cmg_attr(struct channel_path *chp)
+{
+ device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+ device_remove_bin_file(&chp->dev, &chp_measurement_attr);
+}
+
+int chp_add_cmg_attr(struct channel_path *chp)
+{
+ int ret;
+
+ ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
+ if (ret)
+ return ret;
+ ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
+ if (ret)
+ device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+ return ret;
+}
+
+/*
+ * Files for the channel path entries.
+ */
+static ssize_t chp_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ int status;
+
+ mutex_lock(&chp->lock);
+ status = chp->state;
+ mutex_unlock(&chp->lock);
+
+ return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
+}
+
+static ssize_t chp_status_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct channel_path *cp = to_channelpath(dev);
+ char cmd[10];
+ int num_args;
+ int error;
+
+ num_args = sscanf(buf, "%5s", cmd);
+ if (!num_args)
+ return count;
+
+ if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
+ mutex_lock(&cp->lock);
+ error = s390_vary_chpid(cp->chpid, 1);
+ mutex_unlock(&cp->lock);
+ } else if (!strncasecmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
+ mutex_lock(&cp->lock);
+ error = s390_vary_chpid(cp->chpid, 0);
+ mutex_unlock(&cp->lock);
+ } else
+ error = -EINVAL;
+
+ return error < 0 ? error : count;
+}
+
+static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
+
+static ssize_t chp_configure_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *cp;
+ int status;
+
+ cp = to_channelpath(dev);
+ status = chp_info_get_status(cp->chpid);
+ if (status < 0)
+ return status;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", status);
+}
+
+static int cfg_wait_idle(void);
+
+static ssize_t chp_configure_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct channel_path *cp;
+ int val;
+ char delim;
+
+ if (sscanf(buf, "%d %c", &val, &delim) != 1)
+ return -EINVAL;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ cp = to_channelpath(dev);
+ chp_cfg_schedule(cp->chpid, val);
+ cfg_wait_idle();
+
+ return count;
+}
+
+static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
+
+static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ u8 type;
+
+ mutex_lock(&chp->lock);
+ type = chp->desc.desc;
+ mutex_unlock(&chp->lock);
+ return sprintf(buf, "%x\n", type);
+}
+
+static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
+
+static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+
+ if (!chp)
+ return 0;
+ if (chp->cmg == -1) /* channel measurements not available */
+ return sprintf(buf, "unknown\n");
+ return sprintf(buf, "%x\n", chp->cmg);
+}
+
+static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
+
+static ssize_t chp_shared_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+
+ if (!chp)
+ return 0;
+ if (chp->shared == -1) /* channel measurements not available */
+ return sprintf(buf, "unknown\n");
+ return sprintf(buf, "%x\n", chp->shared);
+}
+
+static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
+
+static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
+ else
+ rc = 0;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
+
+static ssize_t chp_chid_external_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
+ else
+ rc = 0;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
+
+static struct attribute *chp_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_configure.attr,
+ &dev_attr_type.attr,
+ &dev_attr_cmg.attr,
+ &dev_attr_shared.attr,
+ &dev_attr_chid.attr,
+ &dev_attr_chid_external.attr,
+ NULL,
+};
+static struct attribute_group chp_attr_group = {
+ .attrs = chp_attrs,
+};
+static const struct attribute_group *chp_attr_groups[] = {
+ &chp_attr_group,
+ NULL,
+};
+
+static void chp_release(struct device *dev)
+{
+ struct channel_path *cp;
+
+ cp = to_channelpath(dev);
+ kfree(cp);
+}
+
+/**
+ * chp_update_desc - update channel-path description
+ * @chp - channel-path
+ *
+ * Update the channel-path description of the specified channel-path.
+ * Return zero on success, non-zero otherwise.
+ */
+int chp_update_desc(struct channel_path *chp)
+{
+ int rc;
+
+ rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc);
+ if (rc)
+ return rc;
+
+ rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+
+ return rc;
+}
+
+/**
+ * chp_new - register a new channel-path
+ * @chpid - channel-path ID
+ *
+ * Create and register data structure representing new channel-path. Return
+ * zero on success, non-zero otherwise.
+ */
+int chp_new(struct chp_id chpid)
+{
+ struct channel_path *chp;
+ int ret;
+
+ if (chp_is_registered(chpid))
+ return 0;
+ chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
+ if (!chp)
+ return -ENOMEM;
+
+ /* fill in status, etc. */
+ chp->chpid = chpid;
+ chp->state = 1;
+ chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
+ chp->dev.groups = chp_attr_groups;
+ chp->dev.release = chp_release;
+ mutex_init(&chp->lock);
+
+ /* Obtain channel path description and fill it in. */
+ ret = chp_update_desc(chp);
+ if (ret)
+ goto out_free;
+ if ((chp->desc.flags & 0x80) == 0) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+ /* Get channel-measurement characteristics. */
+ if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
+ ret = chsc_get_channel_measurement_chars(chp);
+ if (ret)
+ goto out_free;
+ } else {
+ chp->cmg = -1;
+ }
+ dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
+
+ /* make it known to the system */
+ ret = device_register(&chp->dev);
+ if (ret) {
+ CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
+ chpid.cssid, chpid.id, ret);
+ put_device(&chp->dev);
+ goto out;
+ }
+ mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
+ if (channel_subsystems[chpid.cssid]->cm_enabled) {
+ ret = chp_add_cmg_attr(chp);
+ if (ret) {
+ device_unregister(&chp->dev);
+ mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
+ goto out;
+ }
+ }
+ channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
+ mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
+ goto out;
+out_free:
+ kfree(chp);
+out:
+ return ret;
+}
+
+/**
+ * chp_get_chp_desc - return newly allocated channel-path description
+ * @chpid: channel-path ID
+ *
+ * On success return a newly allocated copy of the channel-path description
+ * data associated with the given channel-path ID. Return %NULL on error.
+ */
+struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid)
+{
+ struct channel_path *chp;
+ struct channel_path_desc *desc;
+
+ chp = chpid_to_chp(chpid);
+ if (!chp)
+ return NULL;
+ desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ mutex_lock(&chp->lock);
+ memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
+ mutex_unlock(&chp->lock);
+ return desc;
+}
+
+/**
+ * chp_process_crw - process channel-path status change
+ * @crw0: channel report-word to handler
+ * @crw1: second channel-report word (always NULL)
+ * @overflow: crw overflow indication
+ *
+ * Handle channel-report-words indicating that the status of a channel-path
+ * has changed.
+ */
+static void chp_process_crw(struct crw *crw0, struct crw *crw1,
+ int overflow)
+{
+ struct chp_id chpid;
+
+ if (overflow) {
+ css_schedule_eval_all();
+ return;
+ }
+ CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+ crw0->erc, crw0->rsid);
+ /*
+ * Check for solicited machine checks. These are
+ * created by reset channel path and need not be
+ * handled here.
+ */
+ if (crw0->slct) {
+ CIO_CRW_EVENT(2, "solicited machine check for "
+ "channel path %02X\n", crw0->rsid);
+ return;
+ }
+ chp_id_init(&chpid);
+ chpid.id = crw0->rsid;
+ switch (crw0->erc) {
+ case CRW_ERC_IPARM: /* Path has come. */
+ if (!chp_is_registered(chpid))
+ chp_new(chpid);
+ chsc_chp_online(chpid);
+ break;
+ case CRW_ERC_PERRI: /* Path has gone. */
+ case CRW_ERC_PERRN:
+ chsc_chp_offline(chpid);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
+ crw0->erc);
+ }
+}
+
+int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
+{
+ int i;
+ int mask;
+
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (!(ssd->path_mask & mask))
+ continue;
+ if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
+ continue;
+ if ((ssd->fla_valid_mask & mask) &&
+ ((ssd->fla[i] & link->fla_mask) != link->fla))
+ continue;
+ return mask;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
+
+static inline int info_bit_num(struct chp_id id)
+{
+ return id.id + id.cssid * (__MAX_CHPID + 1);
+}
+
+/* Force chp_info refresh on next call to info_validate(). */
+static void info_expire(void)
+{
+ mutex_lock(&info_lock);
+ chp_info_expires = jiffies - 1;
+ mutex_unlock(&info_lock);
+}
+
+/* Ensure that chp_info is up-to-date. */
+static int info_update(void)
+{
+ int rc;
+
+ mutex_lock(&info_lock);
+ rc = 0;
+ if (time_after(jiffies, chp_info_expires)) {
+ /* Data is too old, update. */
+ rc = sclp_chp_read_info(&chp_info);
+ chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
+ }
+ mutex_unlock(&info_lock);
+
+ return rc;
+}
+
+/**
+ * chp_info_get_status - retrieve configure status of a channel-path
+ * @chpid: channel-path ID
+ *
+ * On success, return 0 for standby, 1 for configured, 2 for reserved,
+ * 3 for not recognized. Return negative error code on error.
+ */
+int chp_info_get_status(struct chp_id chpid)
+{
+ int rc;
+ int bit;
+
+ rc = info_update();
+ if (rc)
+ return rc;
+
+ bit = info_bit_num(chpid);
+ mutex_lock(&info_lock);
+ if (!chp_test_bit(chp_info.recognized, bit))
+ rc = CHP_STATUS_NOT_RECOGNIZED;
+ else if (chp_test_bit(chp_info.configured, bit))
+ rc = CHP_STATUS_CONFIGURED;
+ else if (chp_test_bit(chp_info.standby, bit))
+ rc = CHP_STATUS_STANDBY;
+ else
+ rc = CHP_STATUS_RESERVED;
+ mutex_unlock(&info_lock);
+
+ return rc;
+}
+
+/* Return configure task for chpid. */
+static enum cfg_task_t cfg_get_task(struct chp_id chpid)
+{
+ return chp_cfg_task[chpid.cssid][chpid.id];
+}
+
+/* Set configure task for chpid. */
+static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
+{
+ chp_cfg_task[chpid.cssid][chpid.id] = cfg;
+}
+
+/* Perform one configure/deconfigure request. Reschedule work function until
+ * last request. */
+static void cfg_func(struct work_struct *work)
+{
+ struct chp_id chpid;
+ enum cfg_task_t t;
+ int rc;
+
+ mutex_lock(&cfg_lock);
+ t = cfg_none;
+ chp_id_for_each(&chpid) {
+ t = cfg_get_task(chpid);
+ if (t != cfg_none) {
+ cfg_set_task(chpid, cfg_none);
+ break;
+ }
+ }
+ mutex_unlock(&cfg_lock);
+
+ switch (t) {
+ case cfg_configure:
+ rc = sclp_chp_configure(chpid);
+ if (rc)
+ CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
+ "%d\n", chpid.cssid, chpid.id, rc);
+ else {
+ info_expire();
+ chsc_chp_online(chpid);
+ }
+ break;
+ case cfg_deconfigure:
+ rc = sclp_chp_deconfigure(chpid);
+ if (rc)
+ CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
+ "%d\n", chpid.cssid, chpid.id, rc);
+ else {
+ info_expire();
+ chsc_chp_offline(chpid);
+ }
+ break;
+ case cfg_none:
+ /* Get updated information after last change. */
+ info_update();
+ mutex_lock(&cfg_lock);
+ cfg_busy = 0;
+ mutex_unlock(&cfg_lock);
+ wake_up_interruptible(&cfg_wait_queue);
+ return;
+ }
+ queue_work(chp_wq, &cfg_work);
+}
+
+/**
+ * chp_cfg_schedule - schedule chpid configuration request
+ * @chpid - channel-path ID
+ * @configure - Non-zero for configure, zero for deconfigure
+ *
+ * Schedule a channel-path configuration/deconfiguration request.
+ */
+void chp_cfg_schedule(struct chp_id chpid, int configure)
+{
+ CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
+ configure);
+ mutex_lock(&cfg_lock);
+ cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
+ cfg_busy = 1;
+ mutex_unlock(&cfg_lock);
+ queue_work(chp_wq, &cfg_work);
+}
+
+/**
+ * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
+ * @chpid - channel-path ID
+ *
+ * Cancel an active channel-path deconfiguration request if it has not yet
+ * been performed.
+ */
+void chp_cfg_cancel_deconfigure(struct chp_id chpid)
+{
+ CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
+ mutex_lock(&cfg_lock);
+ if (cfg_get_task(chpid) == cfg_deconfigure)
+ cfg_set_task(chpid, cfg_none);
+ mutex_unlock(&cfg_lock);
+}
+
+static int cfg_wait_idle(void)
+{
+ if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
+ return -ERESTARTSYS;
+ return 0;
+}
+
+static int __init chp_init(void)
+{
+ struct chp_id chpid;
+ int ret;
+
+ ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
+ if (ret)
+ return ret;
+ chp_wq = create_singlethread_workqueue("cio_chp");
+ if (!chp_wq) {
+ crw_unregister_handler(CRW_RSC_CPATH);
+ return -ENOMEM;
+ }
+ INIT_WORK(&cfg_work, cfg_func);
+ init_waitqueue_head(&cfg_wait_queue);
+ if (info_update())
+ return 0;
+ /* Register available channel-paths. */
+ chp_id_for_each(&chpid) {
+ if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
+ chp_new(chpid);
+ }
+
+ return 0;
+}
+
+subsys_initcall(chp_init);
diff --git a/kernel/drivers/s390/cio/chp.h b/kernel/drivers/s390/cio/chp.h
new file mode 100644
index 000000000..4efd5b867
--- /dev/null
+++ b/kernel/drivers/s390/cio/chp.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright IBM Corp. 2007, 2010
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_CHP_H
+#define S390_CHP_H S390_CHP_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <asm/chpid.h>
+#include "chsc.h"
+#include "css.h"
+
+#define CHP_STATUS_STANDBY 0
+#define CHP_STATUS_CONFIGURED 1
+#define CHP_STATUS_RESERVED 2
+#define CHP_STATUS_NOT_RECOGNIZED 3
+
+#define CHP_ONLINE 0
+#define CHP_OFFLINE 1
+#define CHP_VARY_ON 2
+#define CHP_VARY_OFF 3
+
+struct chp_link {
+ struct chp_id chpid;
+ u32 fla_mask;
+ u16 fla;
+};
+
+static inline int chp_test_bit(u8 *bitmap, int num)
+{
+ int byte = num >> 3;
+ int mask = 128 >> (num & 7);
+
+ return (bitmap[byte] & mask) ? 1 : 0;
+}
+
+
+struct channel_path {
+ struct device dev;
+ struct chp_id chpid;
+ struct mutex lock; /* Serialize access to below members. */
+ int state;
+ struct channel_path_desc desc;
+ struct channel_path_desc_fmt1 desc_fmt1;
+ /* Channel-measurement related stuff: */
+ int cmg;
+ int shared;
+ void *cmg_chars;
+};
+
+/* Return channel_path struct for given chpid. */
+static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
+{
+ return channel_subsystems[chpid.cssid]->chps[chpid.id];
+}
+
+int chp_get_status(struct chp_id chpid);
+u8 chp_get_sch_opm(struct subchannel *sch);
+int chp_is_registered(struct chp_id chpid);
+struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid);
+void chp_remove_cmg_attr(struct channel_path *chp);
+int chp_add_cmg_attr(struct channel_path *chp);
+int chp_update_desc(struct channel_path *chp);
+int chp_new(struct chp_id chpid);
+void chp_cfg_schedule(struct chp_id chpid, int configure);
+void chp_cfg_cancel_deconfigure(struct chp_id chpid);
+int chp_info_get_status(struct chp_id chpid);
+int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
+#endif /* S390_CHP_H */
diff --git a/kernel/drivers/s390/cio/chsc.c b/kernel/drivers/s390/cio/chsc.c
new file mode 100644
index 000000000..e3bf885f4
--- /dev/null
+++ b/kernel/drivers/s390/cio/chsc.c
@@ -0,0 +1,1250 @@
+/*
+ * S/390 common I/O routines -- channel subsystem call
+ *
+ * Copyright IBM Corp. 1999,2012
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include <asm/cio.h>
+#include <asm/chpid.h>
+#include <asm/chsc.h>
+#include <asm/crw.h>
+#include <asm/isc.h>
+
+#include "css.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+#include "chp.h"
+#include "chsc.h"
+
+static void *sei_page;
+static void *chsc_page;
+static DEFINE_SPINLOCK(chsc_page_lock);
+
+/**
+ * chsc_error_from_response() - convert a chsc response to an error
+ * @response: chsc response code
+ *
+ * Returns an appropriate Linux error code for @response.
+ */
+int chsc_error_from_response(int response)
+{
+ switch (response) {
+ case 0x0001:
+ return 0;
+ case 0x0002:
+ case 0x0003:
+ case 0x0006:
+ case 0x0007:
+ case 0x0008:
+ case 0x000a:
+ case 0x0104:
+ return -EINVAL;
+ case 0x0004:
+ return -EOPNOTSUPP;
+ case 0x000b:
+ case 0x0107: /* "Channel busy" for the op 0x003d */
+ return -EBUSY;
+ case 0x0100:
+ case 0x0102:
+ return -ENOMEM;
+ default:
+ return -EIO;
+ }
+}
+EXPORT_SYMBOL_GPL(chsc_error_from_response);
+
+struct chsc_ssd_area {
+ struct chsc_header request;
+ u16 :10;
+ u16 ssid:2;
+ u16 :4;
+ u16 f_sch; /* first subchannel */
+ u16 :16;
+ u16 l_sch; /* last subchannel */
+ u32 :32;
+ struct chsc_header response;
+ u32 :32;
+ u8 sch_valid : 1;
+ u8 dev_valid : 1;
+ u8 st : 3; /* subchannel type */
+ u8 zeroes : 3;
+ u8 unit_addr; /* unit address */
+ u16 devno; /* device number */
+ u8 path_mask;
+ u8 fla_valid_mask;
+ u16 sch; /* subchannel */
+ u8 chpid[8]; /* chpids 0-7 */
+ u16 fla[8]; /* full link addresses 0-7 */
+} __attribute__ ((packed));
+
+int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
+{
+ struct chsc_ssd_area *ssd_area;
+ int ccode;
+ int ret;
+ int i;
+ int mask;
+
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ ssd_area = chsc_page;
+ ssd_area->request.length = 0x0010;
+ ssd_area->request.code = 0x0004;
+ ssd_area->ssid = schid.ssid;
+ ssd_area->f_sch = schid.sch_no;
+ ssd_area->l_sch = schid.sch_no;
+
+ ccode = chsc(ssd_area);
+ /* Check response. */
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+ ret = chsc_error_from_response(ssd_area->response.code);
+ if (ret != 0) {
+ CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
+ schid.ssid, schid.sch_no,
+ ssd_area->response.code);
+ goto out;
+ }
+ if (!ssd_area->sch_valid) {
+ ret = -ENODEV;
+ goto out;
+ }
+ /* Copy data */
+ ret = 0;
+ memset(ssd, 0, sizeof(struct chsc_ssd_info));
+ if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
+ (ssd_area->st != SUBCHANNEL_TYPE_MSG))
+ goto out;
+ ssd->path_mask = ssd_area->path_mask;
+ ssd->fla_valid_mask = ssd_area->fla_valid_mask;
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (ssd_area->path_mask & mask) {
+ chp_id_init(&ssd->chpid[i]);
+ ssd->chpid[i].id = ssd_area->chpid[i];
+ }
+ if (ssd_area->fla_valid_mask & mask)
+ ssd->fla[i] = ssd_area->fla[i];
+ }
+out:
+ spin_unlock_irq(&chsc_page_lock);
+ return ret;
+}
+
+/**
+ * chsc_ssqd() - store subchannel QDIO data (SSQD)
+ * @schid: id of the subchannel on which SSQD is performed
+ * @ssqd: request and response block for SSQD
+ *
+ * Returns 0 on success.
+ */
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
+{
+ memset(ssqd, 0, sizeof(*ssqd));
+ ssqd->request.length = 0x0010;
+ ssqd->request.code = 0x0024;
+ ssqd->first_sch = schid.sch_no;
+ ssqd->last_sch = schid.sch_no;
+ ssqd->ssid = schid.ssid;
+
+ if (chsc(ssqd))
+ return -EIO;
+
+ return chsc_error_from_response(ssqd->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_ssqd);
+
+/**
+ * chsc_sadc() - set adapter device controls (SADC)
+ * @schid: id of the subchannel on which SADC is performed
+ * @scssc: request and response block for SADC
+ * @summary_indicator_addr: summary indicator address
+ * @subchannel_indicator_addr: subchannel indicator address
+ *
+ * Returns 0 on success.
+ */
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr)
+{
+ memset(scssc, 0, sizeof(*scssc));
+ scssc->request.length = 0x0fe0;
+ scssc->request.code = 0x0021;
+ scssc->operation_code = 0;
+
+ scssc->summary_indicator_addr = summary_indicator_addr;
+ scssc->subchannel_indicator_addr = subchannel_indicator_addr;
+
+ scssc->ks = PAGE_DEFAULT_KEY >> 4;
+ scssc->kc = PAGE_DEFAULT_KEY >> 4;
+ scssc->isc = QDIO_AIRQ_ISC;
+ scssc->schid = schid;
+
+ /* enable the time delay disablement facility */
+ if (css_general_characteristics.aif_tdd)
+ scssc->word_with_d_bit = 0x10000000;
+
+ if (chsc(scssc))
+ return -EIO;
+
+ return chsc_error_from_response(scssc->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_sadc);
+
+static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
+{
+ spin_lock_irq(sch->lock);
+ if (sch->driver && sch->driver->chp_event)
+ if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
+ goto out_unreg;
+ spin_unlock_irq(sch->lock);
+ return 0;
+
+out_unreg:
+ sch->lpm = 0;
+ spin_unlock_irq(sch->lock);
+ css_schedule_eval(sch->schid);
+ return 0;
+}
+
+void chsc_chp_offline(struct chp_id chpid)
+{
+ char dbf_txt[15];
+ struct chp_link link;
+
+ sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
+ CIO_TRACE_EVENT(2, dbf_txt);
+
+ if (chp_get_status(chpid) <= 0)
+ return;
+ memset(&link, 0, sizeof(struct chp_link));
+ link.chpid = chpid;
+ /* Wait until previous actions have settled. */
+ css_wait_for_slow_path();
+ for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
+}
+
+static int __s390_process_res_acc(struct subchannel *sch, void *data)
+{
+ spin_lock_irq(sch->lock);
+ if (sch->driver && sch->driver->chp_event)
+ sch->driver->chp_event(sch, data, CHP_ONLINE);
+ spin_unlock_irq(sch->lock);
+
+ return 0;
+}
+
+static void s390_process_res_acc(struct chp_link *link)
+{
+ char dbf_txt[15];
+
+ sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
+ link->chpid.id);
+ CIO_TRACE_EVENT( 2, dbf_txt);
+ if (link->fla != 0) {
+ sprintf(dbf_txt, "fla%x", link->fla);
+ CIO_TRACE_EVENT( 2, dbf_txt);
+ }
+ /* Wait until previous actions have settled. */
+ css_wait_for_slow_path();
+ /*
+ * I/O resources may have become accessible.
+ * Scan through all subchannels that may be concerned and
+ * do a validation on those.
+ * The more information we have (info), the less scanning
+ * will we have to do.
+ */
+ for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
+ css_schedule_reprobe();
+}
+
+static int
+__get_chpid_from_lir(void *data)
+{
+ struct lir {
+ u8 iq;
+ u8 ic;
+ u16 sci;
+ /* incident-node descriptor */
+ u32 indesc[28];
+ /* attached-node descriptor */
+ u32 andesc[28];
+ /* incident-specific information */
+ u32 isinfo[28];
+ } __attribute__ ((packed)) *lir;
+
+ lir = data;
+ if (!(lir->iq&0x80))
+ /* NULL link incident record */
+ return -EINVAL;
+ if (!(lir->indesc[0]&0xc0000000))
+ /* node descriptor not valid */
+ return -EINVAL;
+ if (!(lir->indesc[0]&0x10000000))
+ /* don't handle device-type nodes - FIXME */
+ return -EINVAL;
+ /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
+
+ return (u16) (lir->indesc[0]&0x000000ff);
+}
+
+struct chsc_sei_nt0_area {
+ u8 flags;
+ u8 vf; /* validity flags */
+ u8 rs; /* reporting source */
+ u8 cc; /* content code */
+ u16 fla; /* full link address */
+ u16 rsid; /* reporting source id */
+ u32 reserved1;
+ u32 reserved2;
+ /* ccdf has to be big enough for a link-incident record */
+ u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
+} __packed;
+
+struct chsc_sei_nt2_area {
+ u8 flags; /* p and v bit */
+ u8 reserved1;
+ u8 reserved2;
+ u8 cc; /* content code */
+ u32 reserved3[13];
+ u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
+} __packed;
+
+#define CHSC_SEI_NT0 (1ULL << 63)
+#define CHSC_SEI_NT2 (1ULL << 61)
+
+struct chsc_sei {
+ struct chsc_header request;
+ u32 reserved1;
+ u64 ntsm; /* notification type mask */
+ struct chsc_header response;
+ u32 :24;
+ u8 nt;
+ union {
+ struct chsc_sei_nt0_area nt0_area;
+ struct chsc_sei_nt2_area nt2_area;
+ u8 nt_area[PAGE_SIZE - 24];
+ } u;
+} __packed;
+
+static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
+{
+ struct chp_id chpid;
+ int id;
+
+ CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
+ sei_area->rs, sei_area->rsid);
+ if (sei_area->rs != 4)
+ return;
+ id = __get_chpid_from_lir(sei_area->ccdf);
+ if (id < 0)
+ CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
+ else {
+ chp_id_init(&chpid);
+ chpid.id = id;
+ chsc_chp_offline(chpid);
+ }
+}
+
+static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
+{
+ struct chp_link link;
+ struct chp_id chpid;
+ int status;
+
+ CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
+ "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
+ if (sei_area->rs != 4)
+ return;
+ chp_id_init(&chpid);
+ chpid.id = sei_area->rsid;
+ /* allocate a new channel path structure, if needed */
+ status = chp_get_status(chpid);
+ if (status < 0)
+ chp_new(chpid);
+ else if (!status)
+ return;
+ memset(&link, 0, sizeof(struct chp_link));
+ link.chpid = chpid;
+ if ((sei_area->vf & 0xc0) != 0) {
+ link.fla = sei_area->fla;
+ if ((sei_area->vf & 0xc0) == 0xc0)
+ /* full link address */
+ link.fla_mask = 0xffff;
+ else
+ /* link address */
+ link.fla_mask = 0xff00;
+ }
+ s390_process_res_acc(&link);
+}
+
+static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
+{
+ struct channel_path *chp;
+ struct chp_id chpid;
+ u8 *data;
+ int num;
+
+ CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
+ if (sei_area->rs != 0)
+ return;
+ data = sei_area->ccdf;
+ chp_id_init(&chpid);
+ for (num = 0; num <= __MAX_CHPID; num++) {
+ if (!chp_test_bit(data, num))
+ continue;
+ chpid.id = num;
+
+ CIO_CRW_EVENT(4, "Update information for channel path "
+ "%x.%02x\n", chpid.cssid, chpid.id);
+ chp = chpid_to_chp(chpid);
+ if (!chp) {
+ chp_new(chpid);
+ continue;
+ }
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
+}
+
+struct chp_config_data {
+ u8 map[32];
+ u8 op;
+ u8 pc;
+};
+
+static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
+{
+ struct chp_config_data *data;
+ struct chp_id chpid;
+ int num;
+ char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
+
+ CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
+ if (sei_area->rs != 0)
+ return;
+ data = (struct chp_config_data *) &(sei_area->ccdf);
+ chp_id_init(&chpid);
+ for (num = 0; num <= __MAX_CHPID; num++) {
+ if (!chp_test_bit(data->map, num))
+ continue;
+ chpid.id = num;
+ pr_notice("Processing %s for channel path %x.%02x\n",
+ events[data->op], chpid.cssid, chpid.id);
+ switch (data->op) {
+ case 0:
+ chp_cfg_schedule(chpid, 1);
+ break;
+ case 1:
+ chp_cfg_schedule(chpid, 0);
+ break;
+ case 2:
+ chp_cfg_cancel_deconfigure(chpid);
+ break;
+ }
+ }
+}
+
+static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
+{
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm change notification\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_update_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: updating change notification"
+ " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
+{
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm available information\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_process_availability_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: process availability information"
+ " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
+{
+ switch (sei_area->cc) {
+ case 1:
+ zpci_event_error(sei_area->ccdf);
+ break;
+ case 2:
+ zpci_event_availability(sei_area->ccdf);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
+ sei_area->cc);
+ break;
+ }
+}
+
+static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
+{
+ /* which kind of information was stored? */
+ switch (sei_area->cc) {
+ case 1: /* link incident*/
+ chsc_process_sei_link_incident(sei_area);
+ break;
+ case 2: /* i/o resource accessibility */
+ chsc_process_sei_res_acc(sei_area);
+ break;
+ case 7: /* channel-path-availability information */
+ chsc_process_sei_chp_avail(sei_area);
+ break;
+ case 8: /* channel-path-configuration notification */
+ chsc_process_sei_chp_config(sei_area);
+ break;
+ case 12: /* scm change notification */
+ chsc_process_sei_scm_change(sei_area);
+ break;
+ case 14: /* scm available notification */
+ chsc_process_sei_scm_avail(sei_area);
+ break;
+ default: /* other stuff */
+ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
+ sei_area->cc);
+ break;
+ }
+
+ /* Check if we might have lost some information. */
+ if (sei_area->flags & 0x40) {
+ CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
+}
+
+static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
+{
+ static int ntsm_unsupported;
+
+ while (true) {
+ memset(sei, 0, sizeof(*sei));
+ sei->request.length = 0x0010;
+ sei->request.code = 0x000e;
+ if (!ntsm_unsupported)
+ sei->ntsm = ntsm;
+
+ if (chsc(sei))
+ break;
+
+ if (sei->response.code != 0x0001) {
+ CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+ sei->response.code, sei->ntsm);
+
+ if (sei->response.code == 3 && sei->ntsm) {
+ /* Fallback for old firmware. */
+ ntsm_unsupported = 1;
+ continue;
+ }
+ break;
+ }
+
+ CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
+ switch (sei->nt) {
+ case 0:
+ chsc_process_sei_nt0(&sei->u.nt0_area);
+ break;
+ case 2:
+ chsc_process_sei_nt2(&sei->u.nt2_area);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
+ break;
+ }
+
+ if (!(sei->u.nt0_area.flags & 0x80))
+ break;
+ }
+}
+
+/*
+ * Handle channel subsystem related CRWs.
+ * Use store event information to find out what's going on.
+ *
+ * Note: Access to sei_page is serialized through machine check handler
+ * thread, so no need for locking.
+ */
+static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
+{
+ struct chsc_sei *sei = sei_page;
+
+ if (overflow) {
+ css_schedule_eval_all();
+ return;
+ }
+ CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+ crw0->erc, crw0->rsid);
+
+ CIO_TRACE_EVENT(2, "prcss");
+ chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
+}
+
+void chsc_chp_online(struct chp_id chpid)
+{
+ char dbf_txt[15];
+ struct chp_link link;
+
+ sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
+ CIO_TRACE_EVENT(2, dbf_txt);
+
+ if (chp_get_status(chpid) != 0) {
+ memset(&link, 0, sizeof(struct chp_link));
+ link.chpid = chpid;
+ /* Wait until previous actions have settled. */
+ css_wait_for_slow_path();
+ for_each_subchannel_staged(__s390_process_res_acc, NULL,
+ &link);
+ css_schedule_reprobe();
+ }
+}
+
+static void __s390_subchannel_vary_chpid(struct subchannel *sch,
+ struct chp_id chpid, int on)
+{
+ unsigned long flags;
+ struct chp_link link;
+
+ memset(&link, 0, sizeof(struct chp_link));
+ link.chpid = chpid;
+ spin_lock_irqsave(sch->lock, flags);
+ if (sch->driver && sch->driver->chp_event)
+ sch->driver->chp_event(sch, &link,
+ on ? CHP_VARY_ON : CHP_VARY_OFF);
+ spin_unlock_irqrestore(sch->lock, flags);
+}
+
+static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
+{
+ struct chp_id *chpid = data;
+
+ __s390_subchannel_vary_chpid(sch, *chpid, 0);
+ return 0;
+}
+
+static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
+{
+ struct chp_id *chpid = data;
+
+ __s390_subchannel_vary_chpid(sch, *chpid, 1);
+ return 0;
+}
+
+/**
+ * chsc_chp_vary - propagate channel-path vary operation to subchannels
+ * @chpid: channl-path ID
+ * @on: non-zero for vary online, zero for vary offline
+ */
+int chsc_chp_vary(struct chp_id chpid, int on)
+{
+ struct channel_path *chp = chpid_to_chp(chpid);
+
+ /* Wait until previous actions have settled. */
+ css_wait_for_slow_path();
+ /*
+ * Redo PathVerification on the devices the chpid connects to
+ */
+ if (on) {
+ /* Try to update the channel path description. */
+ chp_update_desc(chp);
+ for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
+ NULL, &chpid);
+ css_schedule_reprobe();
+ } else
+ for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
+ NULL, &chpid);
+
+ return 0;
+}
+
+static void
+chsc_remove_cmg_attr(struct channel_subsystem *css)
+{
+ int i;
+
+ for (i = 0; i <= __MAX_CHPID; i++) {
+ if (!css->chps[i])
+ continue;
+ chp_remove_cmg_attr(css->chps[i]);
+ }
+}
+
+static int
+chsc_add_cmg_attr(struct channel_subsystem *css)
+{
+ int i, ret;
+
+ ret = 0;
+ for (i = 0; i <= __MAX_CHPID; i++) {
+ if (!css->chps[i])
+ continue;
+ ret = chp_add_cmg_attr(css->chps[i]);
+ if (ret)
+ goto cleanup;
+ }
+ return ret;
+cleanup:
+ for (--i; i >= 0; i--) {
+ if (!css->chps[i])
+ continue;
+ chp_remove_cmg_attr(css->chps[i]);
+ }
+ return ret;
+}
+
+int __chsc_do_secm(struct channel_subsystem *css, int enable)
+{
+ struct {
+ struct chsc_header request;
+ u32 operation_code : 2;
+ u32 : 30;
+ u32 key : 4;
+ u32 : 28;
+ u32 zeroes1;
+ u32 cub_addr1;
+ u32 zeroes2;
+ u32 cub_addr2;
+ u32 reserved[13];
+ struct chsc_header response;
+ u32 status : 8;
+ u32 : 4;
+ u32 fmt : 4;
+ u32 : 16;
+ } __attribute__ ((packed)) *secm_area;
+ int ret, ccode;
+
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ secm_area = chsc_page;
+ secm_area->request.length = 0x0050;
+ secm_area->request.code = 0x0016;
+
+ secm_area->key = PAGE_DEFAULT_KEY >> 4;
+ secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
+ secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
+
+ secm_area->operation_code = enable ? 0 : 1;
+
+ ccode = chsc(secm_area);
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+
+ switch (secm_area->response.code) {
+ case 0x0102:
+ case 0x0103:
+ ret = -EINVAL;
+ break;
+ default:
+ ret = chsc_error_from_response(secm_area->response.code);
+ }
+ if (ret != 0)
+ CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
+ secm_area->response.code);
+out:
+ spin_unlock_irq(&chsc_page_lock);
+ return ret;
+}
+
+int
+chsc_secm(struct channel_subsystem *css, int enable)
+{
+ int ret;
+
+ if (enable && !css->cm_enabled) {
+ css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!css->cub_addr1 || !css->cub_addr2) {
+ free_page((unsigned long)css->cub_addr1);
+ free_page((unsigned long)css->cub_addr2);
+ return -ENOMEM;
+ }
+ }
+ ret = __chsc_do_secm(css, enable);
+ if (!ret) {
+ css->cm_enabled = enable;
+ if (css->cm_enabled) {
+ ret = chsc_add_cmg_attr(css);
+ if (ret) {
+ __chsc_do_secm(css, 0);
+ css->cm_enabled = 0;
+ }
+ } else
+ chsc_remove_cmg_attr(css);
+ }
+ if (!css->cm_enabled) {
+ free_page((unsigned long)css->cub_addr1);
+ free_page((unsigned long)css->cub_addr2);
+ }
+ return ret;
+}
+
+int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
+ int c, int m, void *page)
+{
+ struct chsc_scpd *scpd_area;
+ int ccode, ret;
+
+ if ((rfmt == 1) && !css_general_characteristics.fcs)
+ return -EINVAL;
+ if ((rfmt == 2) && !css_general_characteristics.cib)
+ return -EINVAL;
+
+ memset(page, 0, PAGE_SIZE);
+ scpd_area = page;
+ scpd_area->request.length = 0x0010;
+ scpd_area->request.code = 0x0002;
+ scpd_area->cssid = chpid.cssid;
+ scpd_area->first_chpid = chpid.id;
+ scpd_area->last_chpid = chpid.id;
+ scpd_area->m = m;
+ scpd_area->c = c;
+ scpd_area->fmt = fmt;
+ scpd_area->rfmt = rfmt;
+
+ ccode = chsc(scpd_area);
+ if (ccode > 0)
+ return (ccode == 3) ? -ENODEV : -EBUSY;
+
+ ret = chsc_error_from_response(scpd_area->response.code);
+ if (ret)
+ CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
+ scpd_area->response.code);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
+
+int chsc_determine_base_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc *desc)
+{
+ struct chsc_response_struct *chsc_resp;
+ struct chsc_scpd *scpd_area;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ scpd_area = chsc_page;
+ ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
+ if (ret)
+ goto out;
+ chsc_resp = (void *)&scpd_area->response;
+ memcpy(desc, &chsc_resp->data, sizeof(*desc));
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return ret;
+}
+
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt1 *desc)
+{
+ struct chsc_response_struct *chsc_resp;
+ struct chsc_scpd *scpd_area;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ scpd_area = chsc_page;
+ ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
+ if (ret)
+ goto out;
+ chsc_resp = (void *)&scpd_area->response;
+ memcpy(desc, &chsc_resp->data, sizeof(*desc));
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return ret;
+}
+
+static void
+chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
+ struct cmg_chars *chars)
+{
+ struct cmg_chars *cmg_chars;
+ int i, mask;
+
+ cmg_chars = chp->cmg_chars;
+ for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
+ mask = 0x80 >> (i + 3);
+ if (cmcv & mask)
+ cmg_chars->values[i] = chars->values[i];
+ else
+ cmg_chars->values[i] = 0;
+ }
+}
+
+int chsc_get_channel_measurement_chars(struct channel_path *chp)
+{
+ struct cmg_chars *cmg_chars;
+ int ccode, ret;
+
+ struct {
+ struct chsc_header request;
+ u32 : 24;
+ u32 first_chpid : 8;
+ u32 : 24;
+ u32 last_chpid : 8;
+ u32 zeroes1;
+ struct chsc_header response;
+ u32 zeroes2;
+ u32 not_valid : 1;
+ u32 shared : 1;
+ u32 : 22;
+ u32 chpid : 8;
+ u32 cmcv : 5;
+ u32 : 11;
+ u32 cmgq : 8;
+ u32 cmg : 8;
+ u32 zeroes3;
+ u32 data[NR_MEASUREMENT_CHARS];
+ } __attribute__ ((packed)) *scmc_area;
+
+ chp->cmg_chars = NULL;
+ cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
+ if (!cmg_chars)
+ return -ENOMEM;
+
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scmc_area = chsc_page;
+ scmc_area->request.length = 0x0010;
+ scmc_area->request.code = 0x0022;
+ scmc_area->first_chpid = chp->chpid.id;
+ scmc_area->last_chpid = chp->chpid.id;
+
+ ccode = chsc(scmc_area);
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+
+ ret = chsc_error_from_response(scmc_area->response.code);
+ if (ret) {
+ CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
+ scmc_area->response.code);
+ goto out;
+ }
+ if (scmc_area->not_valid) {
+ chp->cmg = -1;
+ chp->shared = -1;
+ goto out;
+ }
+ chp->cmg = scmc_area->cmg;
+ chp->shared = scmc_area->shared;
+ if (chp->cmg != 2 && chp->cmg != 3) {
+ /* No cmg-dependent data. */
+ goto out;
+ }
+ chp->cmg_chars = cmg_chars;
+ chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
+ (struct cmg_chars *) &scmc_area->data);
+out:
+ spin_unlock_irq(&chsc_page_lock);
+ if (!chp->cmg_chars)
+ kfree(cmg_chars);
+
+ return ret;
+}
+
+int __init chsc_init(void)
+{
+ int ret;
+
+ sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sei_page || !chsc_page) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
+ if (ret)
+ goto out_err;
+ return ret;
+out_err:
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
+ return ret;
+}
+
+void __init chsc_init_cleanup(void)
+{
+ crw_unregister_handler(CRW_RSC_CSS);
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
+}
+
+int chsc_enable_facility(int operation_code)
+{
+ unsigned long flags;
+ int ret;
+ struct {
+ struct chsc_header request;
+ u8 reserved1:4;
+ u8 format:4;
+ u8 reserved2;
+ u16 operation_code;
+ u32 reserved3;
+ u32 reserved4;
+ u32 operation_data_area[252];
+ struct chsc_header response;
+ u32 reserved5:4;
+ u32 format2:4;
+ u32 reserved6:24;
+ } __attribute__ ((packed)) *sda_area;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sda_area = chsc_page;
+ sda_area->request.length = 0x0400;
+ sda_area->request.code = 0x0031;
+ sda_area->operation_code = operation_code;
+
+ ret = chsc(sda_area);
+ if (ret > 0) {
+ ret = (ret == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+
+ switch (sda_area->response.code) {
+ case 0x0101:
+ ret = -EOPNOTSUPP;
+ break;
+ default:
+ ret = chsc_error_from_response(sda_area->response.code);
+ }
+ if (ret != 0)
+ CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
+ operation_code, sda_area->response.code);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return ret;
+}
+
+struct css_general_char css_general_characteristics;
+struct css_chsc_char css_chsc_characteristics;
+
+int __init
+chsc_determine_css_characteristics(void)
+{
+ int result;
+ struct {
+ struct chsc_header request;
+ u32 reserved1;
+ u32 reserved2;
+ u32 reserved3;
+ struct chsc_header response;
+ u32 reserved4;
+ u32 general_char[510];
+ u32 chsc_char[508];
+ } __attribute__ ((packed)) *scsc_area;
+
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scsc_area = chsc_page;
+ scsc_area->request.length = 0x0010;
+ scsc_area->request.code = 0x0010;
+
+ result = chsc(scsc_area);
+ if (result) {
+ result = (result == 3) ? -ENODEV : -EBUSY;
+ goto exit;
+ }
+
+ result = chsc_error_from_response(scsc_area->response.code);
+ if (result == 0) {
+ memcpy(&css_general_characteristics, scsc_area->general_char,
+ sizeof(css_general_characteristics));
+ memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
+ sizeof(css_chsc_characteristics));
+ } else
+ CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
+ scsc_area->response.code);
+exit:
+ spin_unlock_irq(&chsc_page_lock);
+ return result;
+}
+
+EXPORT_SYMBOL_GPL(css_general_characteristics);
+EXPORT_SYMBOL_GPL(css_chsc_characteristics);
+
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
+{
+ struct {
+ struct chsc_header request;
+ unsigned int rsvd0;
+ unsigned int op : 8;
+ unsigned int rsvd1 : 8;
+ unsigned int ctrl : 16;
+ unsigned int rsvd2[5];
+ struct chsc_header response;
+ unsigned int rsvd3[7];
+ } __attribute__ ((packed)) *rr;
+ int rc;
+
+ memset(page, 0, PAGE_SIZE);
+ rr = page;
+ rr->request.length = 0x0020;
+ rr->request.code = 0x0033;
+ rr->op = op;
+ rr->ctrl = ctrl;
+ rc = chsc(rr);
+ if (rc)
+ return -EIO;
+ rc = (rr->response.code == 0x0001) ? 0 : -EIO;
+ return rc;
+}
+
+int chsc_sstpi(void *page, void *result, size_t size)
+{
+ struct {
+ struct chsc_header request;
+ unsigned int rsvd0[3];
+ struct chsc_header response;
+ char data[size];
+ } __attribute__ ((packed)) *rr;
+ int rc;
+
+ memset(page, 0, PAGE_SIZE);
+ rr = page;
+ rr->request.length = 0x0010;
+ rr->request.code = 0x0038;
+ rc = chsc(rr);
+ if (rc)
+ return -EIO;
+ memcpy(result, &rr->data, size);
+ return (rr->response.code == 0x0001) ? 0 : -EIO;
+}
+
+int chsc_siosl(struct subchannel_id schid)
+{
+ struct {
+ struct chsc_header request;
+ u32 word1;
+ struct subchannel_id sid;
+ u32 word3;
+ struct chsc_header response;
+ u32 word[11];
+ } __attribute__ ((packed)) *siosl_area;
+ unsigned long flags;
+ int ccode;
+ int rc;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ siosl_area = chsc_page;
+ siosl_area->request.length = 0x0010;
+ siosl_area->request.code = 0x0046;
+ siosl_area->word1 = 0x80000000;
+ siosl_area->sid = schid;
+
+ ccode = chsc(siosl_area);
+ if (ccode > 0) {
+ if (ccode == 3)
+ rc = -ENODEV;
+ else
+ rc = -EBUSY;
+ CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
+ schid.ssid, schid.sch_no, ccode);
+ goto out;
+ }
+ rc = chsc_error_from_response(siosl_area->response.code);
+ if (rc)
+ CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
+ schid.ssid, schid.sch_no,
+ siosl_area->response.code);
+ else
+ CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
+ schid.ssid, schid.sch_no);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(chsc_siosl);
+
+/**
+ * chsc_scm_info() - store SCM information (SSI)
+ * @scm_area: request and response block for SSI
+ * @token: continuation token
+ *
+ * Returns 0 on success.
+ */
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
+{
+ int ccode, ret;
+
+ memset(scm_area, 0, sizeof(*scm_area));
+ scm_area->request.length = 0x0020;
+ scm_area->request.code = 0x004C;
+ scm_area->reqtok = token;
+
+ ccode = chsc(scm_area);
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+ ret = chsc_error_from_response(scm_area->response.code);
+ if (ret != 0)
+ CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
+ scm_area->response.code);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_scm_info);
+
+/**
+ * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
+ * @schid: id of the subchannel on which PNSO is performed
+ * @brinfo_area: request and response block for the operation
+ * @resume_token: resume token for multiblock response
+ * @cnc: Boolean change-notification control
+ *
+ * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int chsc_pnso_brinfo(struct subchannel_id schid,
+ struct chsc_pnso_area *brinfo_area,
+ struct chsc_brinfo_resume_token resume_token,
+ int cnc)
+{
+ memset(brinfo_area, 0, sizeof(*brinfo_area));
+ brinfo_area->request.length = 0x0030;
+ brinfo_area->request.code = 0x003d; /* network-subchannel operation */
+ brinfo_area->m = schid.m;
+ brinfo_area->ssid = schid.ssid;
+ brinfo_area->sch = schid.sch_no;
+ brinfo_area->cssid = schid.cssid;
+ brinfo_area->oc = 0; /* Store-network-bridging-information list */
+ brinfo_area->resume_token = resume_token;
+ brinfo_area->n = (cnc != 0);
+ if (chsc(brinfo_area))
+ return -EIO;
+ return chsc_error_from_response(brinfo_area->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
diff --git a/kernel/drivers/s390/cio/chsc.h b/kernel/drivers/s390/cio/chsc.h
new file mode 100644
index 000000000..76c9b5070
--- /dev/null
+++ b/kernel/drivers/s390/cio/chsc.h
@@ -0,0 +1,238 @@
+#ifndef S390_CHSC_H
+#define S390_CHSC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <asm/css_chars.h>
+#include <asm/chpid.h>
+#include <asm/chsc.h>
+#include <asm/schid.h>
+#include <asm/qdio.h>
+
+#define CHSC_SDA_OC_MSS 0x2
+
+#define NR_MEASUREMENT_CHARS 5
+struct cmg_chars {
+ u32 values[NR_MEASUREMENT_CHARS];
+} __attribute__ ((packed));
+
+#define NR_MEASUREMENT_ENTRIES 8
+struct cmg_entry {
+ u32 values[NR_MEASUREMENT_ENTRIES];
+} __attribute__ ((packed));
+
+struct channel_path_desc_fmt1 {
+ u8 flags;
+ u8 lsn;
+ u8 desc;
+ u8 chpid;
+ u32:24;
+ u8 chpp;
+ u32 unused[2];
+ u16 chid;
+ u32:16;
+ u16 mdc;
+ u16:13;
+ u8 r:1;
+ u8 s:1;
+ u8 f:1;
+ u32 zeros[2];
+} __attribute__ ((packed));
+
+struct channel_path;
+
+struct css_chsc_char {
+ u64 res;
+ u64 : 20;
+ u32 secm : 1; /* bit 84 */
+ u32 : 1;
+ u32 scmc : 1; /* bit 86 */
+ u32 : 20;
+ u32 scssc : 1; /* bit 107 */
+ u32 scsscf : 1; /* bit 108 */
+ u32:7;
+ u32 pnso:1; /* bit 116 */
+ u32:11;
+}__attribute__((packed));
+
+extern struct css_chsc_char css_chsc_characteristics;
+
+struct chsc_ssd_info {
+ u8 path_mask;
+ u8 fla_valid_mask;
+ struct chp_id chpid[8];
+ u16 fla[8];
+};
+
+struct chsc_ssqd_area {
+ struct chsc_header request;
+ u16:10;
+ u8 ssid:2;
+ u8 fmt:4;
+ u16 first_sch;
+ u16:16;
+ u16 last_sch;
+ u32:32;
+ struct chsc_header response;
+ u32:32;
+ struct qdio_ssqd_desc qdio_ssqd;
+} __packed;
+
+struct chsc_scssc_area {
+ struct chsc_header request;
+ u16 operation_code;
+ u16:16;
+ u32:32;
+ u32:32;
+ u64 summary_indicator_addr;
+ u64 subchannel_indicator_addr;
+ u32 ks:4;
+ u32 kc:4;
+ u32:21;
+ u32 isc:3;
+ u32 word_with_d_bit;
+ u32:32;
+ struct subchannel_id schid;
+ u32 reserved[1004];
+ struct chsc_header response;
+ u32:32;
+} __packed;
+
+struct chsc_scpd {
+ struct chsc_header request;
+ u32:2;
+ u32 m:1;
+ u32 c:1;
+ u32 fmt:4;
+ u32 cssid:8;
+ u32:4;
+ u32 rfmt:4;
+ u32 first_chpid:8;
+ u32:24;
+ u32 last_chpid:8;
+ u32 zeroes1;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+} __attribute__ ((packed));
+
+
+extern int chsc_get_ssd_info(struct subchannel_id schid,
+ struct chsc_ssd_info *ssd);
+extern int chsc_determine_css_characteristics(void);
+extern int chsc_init(void);
+extern void chsc_init_cleanup(void);
+
+extern int chsc_enable_facility(int);
+struct channel_subsystem;
+extern int chsc_secm(struct channel_subsystem *, int);
+int __chsc_do_secm(struct channel_subsystem *css, int enable);
+
+int chsc_chp_vary(struct chp_id chpid, int on);
+int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
+ int c, int m, void *page);
+int chsc_determine_base_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc *desc);
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt1 *desc);
+void chsc_chp_online(struct chp_id chpid);
+void chsc_chp_offline(struct chp_id chpid);
+int chsc_get_channel_measurement_chars(struct channel_path *chp);
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr);
+int chsc_error_from_response(int response);
+
+int chsc_siosl(struct subchannel_id schid);
+
+/* Functions and definitions to query storage-class memory. */
+struct sale {
+ u64 sa;
+ u32 p:4;
+ u32 op_state:4;
+ u32 data_state:4;
+ u32 rank:4;
+ u32 r:1;
+ u32:7;
+ u32 rid:8;
+ u32:32;
+} __packed;
+
+struct chsc_scm_info {
+ struct chsc_header request;
+ u32:32;
+ u64 reqtok;
+ u32 reserved1[4];
+ struct chsc_header response;
+ u64:56;
+ u8 rq;
+ u32 mbc;
+ u64 msa;
+ u16 is;
+ u16 mmc;
+ u32 mci;
+ u64 nr_scm_ini;
+ u64 nr_scm_unini;
+ u32 reserved2[10];
+ u64 restok;
+ struct sale scmal[248];
+} __packed;
+
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
+
+struct chsc_brinfo_resume_token {
+ u64 t1;
+ u64 t2;
+} __packed;
+
+struct chsc_brinfo_naihdr {
+ struct chsc_brinfo_resume_token resume_token;
+ u32:32;
+ u32 instance;
+ u32:24;
+ u8 naids;
+ u32 reserved[3];
+} __packed;
+
+struct chsc_pnso_area {
+ struct chsc_header request;
+ u8:2;
+ u8 m:1;
+ u8:5;
+ u8:2;
+ u8 ssid:2;
+ u8 fmt:4;
+ u16 sch;
+ u8:8;
+ u8 cssid;
+ u16:16;
+ u8 oc;
+ u32:24;
+ struct chsc_brinfo_resume_token resume_token;
+ u32 n:1;
+ u32:31;
+ u32 reserved[3];
+ struct chsc_header response;
+ u32:32;
+ struct chsc_brinfo_naihdr naihdr;
+ union {
+ struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
+ struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
+ struct qdio_brinfo_entry_l2 l2[0];
+ } entries;
+} __packed;
+
+int chsc_pnso_brinfo(struct subchannel_id schid,
+ struct chsc_pnso_area *brinfo_area,
+ struct chsc_brinfo_resume_token resume_token,
+ int cnc);
+
+#ifdef CONFIG_SCM_BUS
+int scm_update_information(void);
+int scm_process_availability_information(void);
+#else /* CONFIG_SCM_BUS */
+static inline int scm_update_information(void) { return 0; }
+static inline int scm_process_availability_information(void) { return 0; }
+#endif /* CONFIG_SCM_BUS */
+
+
+#endif
diff --git a/kernel/drivers/s390/cio/chsc_sch.c b/kernel/drivers/s390/cio/chsc_sch.c
new file mode 100644
index 000000000..213159dec
--- /dev/null
+++ b/kernel/drivers/s390/cio/chsc_sch.c
@@ -0,0 +1,1017 @@
+/*
+ * Driver for s390 chsc subchannels
+ *
+ * Copyright IBM Corp. 2008, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/compat.h>
+#include <asm/cio.h>
+#include <asm/chsc.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "chsc_sch.h"
+#include "ioasm.h"
+
+static debug_info_t *chsc_debug_msg_id;
+static debug_info_t *chsc_debug_log_id;
+
+static struct chsc_request *on_close_request;
+static struct chsc_async_area *on_close_chsc_area;
+static DEFINE_MUTEX(on_close_mutex);
+
+#define CHSC_MSG(imp, args...) do { \
+ debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
+ } while (0)
+
+#define CHSC_LOG(imp, txt) do { \
+ debug_text_event(chsc_debug_log_id, imp , txt); \
+ } while (0)
+
+static void CHSC_LOG_HEX(int level, void *data, int length)
+{
+ while (length > 0) {
+ debug_event(chsc_debug_log_id, level, data, length);
+ length -= chsc_debug_log_id->buf_size;
+ data += chsc_debug_log_id->buf_size;
+ }
+}
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("driver for s390 chsc subchannels");
+MODULE_LICENSE("GPL");
+
+static void chsc_subchannel_irq(struct subchannel *sch)
+{
+ struct chsc_private *private = dev_get_drvdata(&sch->dev);
+ struct chsc_request *request = private->request;
+ struct irb *irb = this_cpu_ptr(&cio_irb);
+
+ CHSC_LOG(4, "irb");
+ CHSC_LOG_HEX(4, irb, sizeof(*irb));
+ inc_irq_stat(IRQIO_CSC);
+
+ /* Copy irb to provided request and set done. */
+ if (!request) {
+ CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ return;
+ }
+ private->request = NULL;
+ memcpy(&request->irb, irb, sizeof(*irb));
+ cio_update_schib(sch);
+ complete(&request->completion);
+ put_device(&sch->dev);
+}
+
+static int chsc_subchannel_probe(struct subchannel *sch)
+{
+ struct chsc_private *private;
+ int ret;
+
+ CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ sch->isc = CHSC_SCH_ISC;
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+ dev_set_drvdata(&sch->dev, private);
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret) {
+ CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
+ sch->schid.ssid, sch->schid.sch_no, ret);
+ dev_set_drvdata(&sch->dev, NULL);
+ kfree(private);
+ } else {
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+ }
+ return ret;
+}
+
+static int chsc_subchannel_remove(struct subchannel *sch)
+{
+ struct chsc_private *private;
+
+ cio_disable_subchannel(sch);
+ private = dev_get_drvdata(&sch->dev);
+ dev_set_drvdata(&sch->dev, NULL);
+ if (private->request) {
+ complete(&private->request->completion);
+ put_device(&sch->dev);
+ }
+ kfree(private);
+ return 0;
+}
+
+static void chsc_subchannel_shutdown(struct subchannel *sch)
+{
+ cio_disable_subchannel(sch);
+}
+
+static int chsc_subchannel_prepare(struct subchannel *sch)
+{
+ int cc;
+ struct schib schib;
+ /*
+ * Don't allow suspend while the subchannel is not idle
+ * since we don't have a way to clear the subchannel and
+ * cannot disable it with a request running.
+ */
+ cc = stsch_err(sch->schid, &schib);
+ if (!cc && scsw_stctl(&schib.scsw))
+ return -EAGAIN;
+ return 0;
+}
+
+static int chsc_subchannel_freeze(struct subchannel *sch)
+{
+ return cio_disable_subchannel(sch);
+}
+
+static int chsc_subchannel_restore(struct subchannel *sch)
+{
+ return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
+static struct css_device_id chsc_subchannel_ids[] = {
+ { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
+
+static struct css_driver chsc_subchannel_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "chsc_subchannel",
+ },
+ .subchannel_type = chsc_subchannel_ids,
+ .irq = chsc_subchannel_irq,
+ .probe = chsc_subchannel_probe,
+ .remove = chsc_subchannel_remove,
+ .shutdown = chsc_subchannel_shutdown,
+ .prepare = chsc_subchannel_prepare,
+ .freeze = chsc_subchannel_freeze,
+ .thaw = chsc_subchannel_restore,
+ .restore = chsc_subchannel_restore,
+};
+
+static int __init chsc_init_dbfs(void)
+{
+ chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
+ if (!chsc_debug_msg_id)
+ goto out;
+ debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
+ debug_set_level(chsc_debug_msg_id, 2);
+ chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
+ if (!chsc_debug_log_id)
+ goto out;
+ debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
+ debug_set_level(chsc_debug_log_id, 2);
+ return 0;
+out:
+ if (chsc_debug_msg_id)
+ debug_unregister(chsc_debug_msg_id);
+ return -ENOMEM;
+}
+
+static void chsc_remove_dbfs(void)
+{
+ debug_unregister(chsc_debug_log_id);
+ debug_unregister(chsc_debug_msg_id);
+}
+
+static int __init chsc_init_sch_driver(void)
+{
+ return css_driver_register(&chsc_subchannel_driver);
+}
+
+static void chsc_cleanup_sch_driver(void)
+{
+ css_driver_unregister(&chsc_subchannel_driver);
+}
+
+static DEFINE_SPINLOCK(chsc_lock);
+
+static int chsc_subchannel_match_next_free(struct device *dev, void *data)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
+}
+
+static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
+{
+ struct device *dev;
+
+ dev = driver_find_device(&chsc_subchannel_driver.drv,
+ sch ? &sch->dev : NULL, NULL,
+ chsc_subchannel_match_next_free);
+ return dev ? to_subchannel(dev) : NULL;
+}
+
+/**
+ * chsc_async() - try to start a chsc request asynchronously
+ * @chsc_area: request to be started
+ * @request: request structure to associate
+ *
+ * Tries to start a chsc request on one of the existing chsc subchannels.
+ * Returns:
+ * %0 if the request was performed synchronously
+ * %-EINPROGRESS if the request was successfully started
+ * %-EBUSY if all chsc subchannels are busy
+ * %-ENODEV if no chsc subchannels are available
+ * Context:
+ * interrupts disabled, chsc_lock held
+ */
+static int chsc_async(struct chsc_async_area *chsc_area,
+ struct chsc_request *request)
+{
+ int cc;
+ struct chsc_private *private;
+ struct subchannel *sch = NULL;
+ int ret = -ENODEV;
+ char dbf[10];
+
+ chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
+ while ((sch = chsc_get_next_subchannel(sch))) {
+ spin_lock(sch->lock);
+ private = dev_get_drvdata(&sch->dev);
+ if (private->request) {
+ spin_unlock(sch->lock);
+ ret = -EBUSY;
+ continue;
+ }
+ chsc_area->header.sid = sch->schid;
+ CHSC_LOG(2, "schid");
+ CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
+ cc = chsc(chsc_area);
+ snprintf(dbf, sizeof(dbf), "cc:%d", cc);
+ CHSC_LOG(2, dbf);
+ switch (cc) {
+ case 0:
+ ret = 0;
+ break;
+ case 1:
+ sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
+ ret = -EINPROGRESS;
+ private->request = request;
+ break;
+ case 2:
+ ret = -EBUSY;
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ spin_unlock(sch->lock);
+ CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, cc);
+ if (ret == -EINPROGRESS)
+ return -EINPROGRESS;
+ put_device(&sch->dev);
+ if (ret == 0)
+ return 0;
+ }
+ return ret;
+}
+
+static void chsc_log_command(void *chsc_area)
+{
+ char dbf[10];
+
+ snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]);
+ CHSC_LOG(0, dbf);
+ CHSC_LOG_HEX(0, chsc_area, 32);
+}
+
+static int chsc_examine_irb(struct chsc_request *request)
+{
+ int backed_up;
+
+ if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
+ return -EIO;
+ backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
+ request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
+ if (scsw_cstat(&request->irb.scsw) == 0)
+ return 0;
+ if (!backed_up)
+ return 0;
+ if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
+ return -EIO;
+ if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
+ return -EPERM;
+ if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
+ return -EAGAIN;
+ if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
+ return -EAGAIN;
+ return -EIO;
+}
+
+static int chsc_ioctl_start(void __user *user_area)
+{
+ struct chsc_request *request;
+ struct chsc_async_area *chsc_area;
+ int ret;
+ char dbf[10];
+
+ if (!css_general_characteristics.dynio)
+ /* It makes no sense to try. */
+ return -EOPNOTSUPP;
+ chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ if (!chsc_area)
+ return -ENOMEM;
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (!request) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ init_completion(&request->completion);
+ if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ chsc_log_command(chsc_area);
+ spin_lock_irq(&chsc_lock);
+ ret = chsc_async(chsc_area, request);
+ spin_unlock_irq(&chsc_lock);
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&request->completion);
+ ret = chsc_examine_irb(request);
+ }
+ /* copy area back to user */
+ if (!ret)
+ if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
+ ret = -EFAULT;
+out_free:
+ snprintf(dbf, sizeof(dbf), "ret:%d", ret);
+ CHSC_LOG(0, dbf);
+ kfree(request);
+ free_page((unsigned long)chsc_area);
+ return ret;
+}
+
+static int chsc_ioctl_on_close_set(void __user *user_area)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (on_close_chsc_area) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL);
+ if (!on_close_request) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ if (!on_close_chsc_area) {
+ ret = -ENOMEM;
+ goto out_free_request;
+ }
+ if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto out_free_chsc;
+ }
+ ret = 0;
+ goto out_unlock;
+
+out_free_chsc:
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+out_free_request:
+ kfree(on_close_request);
+ on_close_request = NULL;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ snprintf(dbf, sizeof(dbf), "ocsret:%d", ret);
+ CHSC_LOG(0, dbf);
+ return ret;
+}
+
+static int chsc_ioctl_on_close_remove(void)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (!on_close_chsc_area) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+ kfree(on_close_request);
+ on_close_request = NULL;
+ ret = 0;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ snprintf(dbf, sizeof(dbf), "ocrret:%d", ret);
+ CHSC_LOG(0, dbf);
+ return ret;
+}
+
+static int chsc_ioctl_start_sync(void __user *user_area)
+{
+ struct chsc_sync_area *chsc_area;
+ int ret, ccode;
+
+ chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!chsc_area)
+ return -ENOMEM;
+ if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ if (chsc_area->header.code & 0x4000) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ chsc_log_command(chsc_area);
+ ccode = chsc(chsc_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ free_page((unsigned long)chsc_area);
+ return ret;
+}
+
+static int chsc_ioctl_info_channel_path(void __user *user_cd)
+{
+ struct chsc_chp_cd *cd;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 : 2;
+ u32 m : 1;
+ u32 : 1;
+ u32 fmt1 : 4;
+ u32 cssid : 8;
+ u32 : 8;
+ u32 first_chpid : 8;
+ u32 : 24;
+ u32 last_chpid : 8;
+ u32 : 32;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+ } __attribute__ ((packed)) *scpcd_area;
+
+ scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scpcd_area)
+ return -ENOMEM;
+ cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(cd, user_cd, sizeof(*cd))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ scpcd_area->request.length = 0x0010;
+ scpcd_area->request.code = 0x0028;
+ scpcd_area->m = cd->m;
+ scpcd_area->fmt1 = cd->fmt;
+ scpcd_area->cssid = cd->chpid.cssid;
+ scpcd_area->first_chpid = cd->chpid.id;
+ scpcd_area->last_chpid = cd->chpid.id;
+
+ ccode = chsc(scpcd_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (scpcd_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "scpcd: response code=%x\n",
+ scpcd_area->response.code);
+ goto out_free;
+ }
+ memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
+ if (copy_to_user(user_cd, cd, sizeof(*cd)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(cd);
+ free_page((unsigned long)scpcd_area);
+ return ret;
+}
+
+static int chsc_ioctl_info_cu(void __user *user_cd)
+{
+ struct chsc_cu_cd *cd;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 : 2;
+ u32 m : 1;
+ u32 : 1;
+ u32 fmt1 : 4;
+ u32 cssid : 8;
+ u32 : 8;
+ u32 first_cun : 8;
+ u32 : 24;
+ u32 last_cun : 8;
+ u32 : 32;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+ } __attribute__ ((packed)) *scucd_area;
+
+ scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scucd_area)
+ return -ENOMEM;
+ cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(cd, user_cd, sizeof(*cd))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ scucd_area->request.length = 0x0010;
+ scucd_area->request.code = 0x0028;
+ scucd_area->m = cd->m;
+ scucd_area->fmt1 = cd->fmt;
+ scucd_area->cssid = cd->cssid;
+ scucd_area->first_cun = cd->cun;
+ scucd_area->last_cun = cd->cun;
+
+ ccode = chsc(scucd_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (scucd_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "scucd: response code=%x\n",
+ scucd_area->response.code);
+ goto out_free;
+ }
+ memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
+ if (copy_to_user(user_cd, cd, sizeof(*cd)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(cd);
+ free_page((unsigned long)scucd_area);
+ return ret;
+}
+
+static int chsc_ioctl_info_sch_cu(void __user *user_cud)
+{
+ struct chsc_sch_cud *cud;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 : 2;
+ u32 m : 1;
+ u32 : 5;
+ u32 fmt1 : 4;
+ u32 : 2;
+ u32 ssid : 2;
+ u32 first_sch : 16;
+ u32 : 8;
+ u32 cssid : 8;
+ u32 last_sch : 16;
+ u32 : 32;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+ } __attribute__ ((packed)) *sscud_area;
+
+ sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sscud_area)
+ return -ENOMEM;
+ cud = kzalloc(sizeof(*cud), GFP_KERNEL);
+ if (!cud) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(cud, user_cud, sizeof(*cud))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ sscud_area->request.length = 0x0010;
+ sscud_area->request.code = 0x0006;
+ sscud_area->m = cud->schid.m;
+ sscud_area->fmt1 = cud->fmt;
+ sscud_area->ssid = cud->schid.ssid;
+ sscud_area->first_sch = cud->schid.sch_no;
+ sscud_area->cssid = cud->schid.cssid;
+ sscud_area->last_sch = cud->schid.sch_no;
+
+ ccode = chsc(sscud_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (sscud_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "sscud: response code=%x\n",
+ sscud_area->response.code);
+ goto out_free;
+ }
+ memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
+ if (copy_to_user(user_cud, cud, sizeof(*cud)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(cud);
+ free_page((unsigned long)sscud_area);
+ return ret;
+}
+
+static int chsc_ioctl_conf_info(void __user *user_ci)
+{
+ struct chsc_conf_info *ci;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 : 2;
+ u32 m : 1;
+ u32 : 1;
+ u32 fmt1 : 4;
+ u32 cssid : 8;
+ u32 : 6;
+ u32 ssid : 2;
+ u32 : 8;
+ u64 : 64;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+ } __attribute__ ((packed)) *sci_area;
+
+ sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sci_area)
+ return -ENOMEM;
+ ci = kzalloc(sizeof(*ci), GFP_KERNEL);
+ if (!ci) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(ci, user_ci, sizeof(*ci))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ sci_area->request.length = 0x0010;
+ sci_area->request.code = 0x0012;
+ sci_area->m = ci->id.m;
+ sci_area->fmt1 = ci->fmt;
+ sci_area->cssid = ci->id.cssid;
+ sci_area->ssid = ci->id.ssid;
+
+ ccode = chsc(sci_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (sci_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "sci: response code=%x\n",
+ sci_area->response.code);
+ goto out_free;
+ }
+ memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
+ if (copy_to_user(user_ci, ci, sizeof(*ci)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(ci);
+ free_page((unsigned long)sci_area);
+ return ret;
+}
+
+static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
+{
+ struct chsc_comp_list *ccl;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 ctype : 8;
+ u32 : 4;
+ u32 fmt : 4;
+ u32 : 16;
+ u64 : 64;
+ u32 list_parm[2];
+ u64 : 64;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 36];
+ } __attribute__ ((packed)) *sccl_area;
+ struct {
+ u32 m : 1;
+ u32 : 31;
+ u32 cssid : 8;
+ u32 : 16;
+ u32 chpid : 8;
+ } __attribute__ ((packed)) *chpid_parm;
+ struct {
+ u32 f_cssid : 8;
+ u32 l_cssid : 8;
+ u32 : 16;
+ u32 res;
+ } __attribute__ ((packed)) *cssids_parm;
+
+ sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccl_area)
+ return -ENOMEM;
+ ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
+ if (!ccl) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ sccl_area->request.length = 0x0020;
+ sccl_area->request.code = 0x0030;
+ sccl_area->fmt = ccl->req.fmt;
+ sccl_area->ctype = ccl->req.ctype;
+ switch (sccl_area->ctype) {
+ case CCL_CU_ON_CHP:
+ case CCL_IOP_CHP:
+ chpid_parm = (void *)&sccl_area->list_parm;
+ chpid_parm->m = ccl->req.chpid.m;
+ chpid_parm->cssid = ccl->req.chpid.chp.cssid;
+ chpid_parm->chpid = ccl->req.chpid.chp.id;
+ break;
+ case CCL_CSS_IMG:
+ case CCL_CSS_IMG_CONF_CHAR:
+ cssids_parm = (void *)&sccl_area->list_parm;
+ cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
+ cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
+ break;
+ }
+ ccode = chsc(sccl_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (sccl_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "sccl: response code=%x\n",
+ sccl_area->response.code);
+ goto out_free;
+ }
+ memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
+ if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(ccl);
+ free_page((unsigned long)sccl_area);
+ return ret;
+}
+
+static int chsc_ioctl_chpd(void __user *user_chpd)
+{
+ struct chsc_scpd *scpd_area;
+ struct chsc_cpd_info *chpd;
+ int ret;
+
+ chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
+ scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scpd_area || !chpd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
+ chpd->rfmt, chpd->c, chpd->m,
+ scpd_area);
+ if (ret)
+ goto out_free;
+ memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
+ if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
+ ret = -EFAULT;
+out_free:
+ kfree(chpd);
+ free_page((unsigned long)scpd_area);
+ return ret;
+}
+
+static int chsc_ioctl_dcal(void __user *user_dcal)
+{
+ struct chsc_dcal *dcal;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 atype : 8;
+ u32 : 4;
+ u32 fmt : 4;
+ u32 : 16;
+ u32 res0[2];
+ u32 list_parm[2];
+ u32 res1[2];
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 36];
+ } __attribute__ ((packed)) *sdcal_area;
+
+ sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sdcal_area)
+ return -ENOMEM;
+ dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
+ if (!dcal) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ sdcal_area->request.length = 0x0020;
+ sdcal_area->request.code = 0x0034;
+ sdcal_area->atype = dcal->req.atype;
+ sdcal_area->fmt = dcal->req.fmt;
+ memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
+ sizeof(sdcal_area->list_parm));
+
+ ccode = chsc(sdcal_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (sdcal_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "sdcal: response code=%x\n",
+ sdcal_area->response.code);
+ goto out_free;
+ }
+ memcpy(&dcal->sdcal, &sdcal_area->response,
+ sdcal_area->response.length);
+ if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(dcal);
+ free_page((unsigned long)sdcal_area);
+ return ret;
+}
+
+static long chsc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp;
+
+ CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (void __user *)arg;
+ switch (cmd) {
+ case CHSC_START:
+ return chsc_ioctl_start(argp);
+ case CHSC_START_SYNC:
+ return chsc_ioctl_start_sync(argp);
+ case CHSC_INFO_CHANNEL_PATH:
+ return chsc_ioctl_info_channel_path(argp);
+ case CHSC_INFO_CU:
+ return chsc_ioctl_info_cu(argp);
+ case CHSC_INFO_SCH_CU:
+ return chsc_ioctl_info_sch_cu(argp);
+ case CHSC_INFO_CI:
+ return chsc_ioctl_conf_info(argp);
+ case CHSC_INFO_CCL:
+ return chsc_ioctl_conf_comp_list(argp);
+ case CHSC_INFO_CPD:
+ return chsc_ioctl_chpd(argp);
+ case CHSC_INFO_DCAL:
+ return chsc_ioctl_dcal(argp);
+ case CHSC_ON_CLOSE_SET:
+ return chsc_ioctl_on_close_set(argp);
+ case CHSC_ON_CLOSE_REMOVE:
+ return chsc_ioctl_on_close_remove();
+ default: /* unknown ioctl number */
+ return -ENOIOCTLCMD;
+ }
+}
+
+static atomic_t chsc_ready_for_use = ATOMIC_INIT(1);
+
+static int chsc_open(struct inode *inode, struct file *file)
+{
+ if (!atomic_dec_and_test(&chsc_ready_for_use)) {
+ atomic_inc(&chsc_ready_for_use);
+ return -EBUSY;
+ }
+ return nonseekable_open(inode, file);
+}
+
+static int chsc_release(struct inode *inode, struct file *filp)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (!on_close_chsc_area)
+ goto out_unlock;
+ init_completion(&on_close_request->completion);
+ CHSC_LOG(0, "on_close");
+ chsc_log_command(on_close_chsc_area);
+ spin_lock_irq(&chsc_lock);
+ ret = chsc_async(on_close_chsc_area, on_close_request);
+ spin_unlock_irq(&chsc_lock);
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&on_close_request->completion);
+ ret = chsc_examine_irb(on_close_request);
+ }
+ snprintf(dbf, sizeof(dbf), "relret:%d", ret);
+ CHSC_LOG(0, dbf);
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+ kfree(on_close_request);
+ on_close_request = NULL;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ atomic_inc(&chsc_ready_for_use);
+ return 0;
+}
+
+static const struct file_operations chsc_fops = {
+ .owner = THIS_MODULE,
+ .open = chsc_open,
+ .release = chsc_release,
+ .unlocked_ioctl = chsc_ioctl,
+ .compat_ioctl = chsc_ioctl,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice chsc_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "chsc",
+ .fops = &chsc_fops,
+};
+
+static int __init chsc_misc_init(void)
+{
+ return misc_register(&chsc_misc_device);
+}
+
+static void chsc_misc_cleanup(void)
+{
+ misc_deregister(&chsc_misc_device);
+}
+
+static int __init chsc_sch_init(void)
+{
+ int ret;
+
+ ret = chsc_init_dbfs();
+ if (ret)
+ return ret;
+ isc_register(CHSC_SCH_ISC);
+ ret = chsc_init_sch_driver();
+ if (ret)
+ goto out_dbf;
+ ret = chsc_misc_init();
+ if (ret)
+ goto out_driver;
+ return ret;
+out_driver:
+ chsc_cleanup_sch_driver();
+out_dbf:
+ isc_unregister(CHSC_SCH_ISC);
+ chsc_remove_dbfs();
+ return ret;
+}
+
+static void __exit chsc_sch_exit(void)
+{
+ chsc_misc_cleanup();
+ chsc_cleanup_sch_driver();
+ isc_unregister(CHSC_SCH_ISC);
+ chsc_remove_dbfs();
+}
+
+module_init(chsc_sch_init);
+module_exit(chsc_sch_exit);
diff --git a/kernel/drivers/s390/cio/chsc_sch.h b/kernel/drivers/s390/cio/chsc_sch.h
new file mode 100644
index 000000000..589ebfad6
--- /dev/null
+++ b/kernel/drivers/s390/cio/chsc_sch.h
@@ -0,0 +1,13 @@
+#ifndef _CHSC_SCH_H
+#define _CHSC_SCH_H
+
+struct chsc_request {
+ struct completion completion;
+ struct irb irb;
+};
+
+struct chsc_private {
+ struct chsc_request *request;
+};
+
+#endif
diff --git a/kernel/drivers/s390/cio/cio.c b/kernel/drivers/s390/cio/cio.c
new file mode 100644
index 000000000..07fc5d9e7
--- /dev/null
+++ b/kernel/drivers/s390/cio/cio.c
@@ -0,0 +1,1020 @@
+/*
+ * S/390 common I/O routines -- low level i/o calls
+ *
+ * Copyright IBM Corp. 1999, 2008
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/cio.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/setup.h>
+#include <asm/reset.h>
+#include <asm/ipl.h>
+#include <asm/chpid.h>
+#include <asm/airq.h>
+#include <asm/isc.h>
+#include <linux/cputime.h>
+#include <asm/fcx.h>
+#include <asm/nmi.h>
+#include <asm/crw.h>
+#include "cio.h"
+#include "css.h"
+#include "chsc.h"
+#include "ioasm.h"
+#include "io_sch.h"
+#include "blacklist.h"
+#include "cio_debug.h"
+#include "chp.h"
+
+debug_info_t *cio_debug_msg_id;
+debug_info_t *cio_debug_trace_id;
+debug_info_t *cio_debug_crw_id;
+
+DEFINE_PER_CPU_ALIGNED(struct irb, cio_irb);
+EXPORT_PER_CPU_SYMBOL(cio_irb);
+
+/*
+ * Function: cio_debug_init
+ * Initializes three debug logs for common I/O:
+ * - cio_msg logs generic cio messages
+ * - cio_trace logs the calling of different functions
+ * - cio_crw logs machine check related cio messages
+ */
+static int __init cio_debug_init(void)
+{
+ cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long));
+ if (!cio_debug_msg_id)
+ goto out_unregister;
+ debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
+ debug_set_level(cio_debug_msg_id, 2);
+ cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
+ if (!cio_debug_trace_id)
+ goto out_unregister;
+ debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
+ debug_set_level(cio_debug_trace_id, 2);
+ cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long));
+ if (!cio_debug_crw_id)
+ goto out_unregister;
+ debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
+ debug_set_level(cio_debug_crw_id, 4);
+ return 0;
+
+out_unregister:
+ if (cio_debug_msg_id)
+ debug_unregister(cio_debug_msg_id);
+ if (cio_debug_trace_id)
+ debug_unregister(cio_debug_trace_id);
+ if (cio_debug_crw_id)
+ debug_unregister(cio_debug_crw_id);
+ return -1;
+}
+
+arch_initcall (cio_debug_init);
+
+int cio_set_options(struct subchannel *sch, int flags)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+
+ priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
+ priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
+ priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
+ return 0;
+}
+
+static int
+cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
+{
+ char dbf_text[15];
+
+ if (lpm != 0)
+ sch->lpm &= ~lpm;
+ else
+ sch->lpm = 0;
+
+ CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
+ "subchannel 0.%x.%04x!\n", sch->schid.ssid,
+ sch->schid.sch_no);
+
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ sprintf(dbf_text, "no%s", dev_name(&sch->dev));
+ CIO_TRACE_EVENT(0, dbf_text);
+ CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
+
+ return (sch->lpm ? -EACCES : -ENODEV);
+}
+
+int
+cio_start_key (struct subchannel *sch, /* subchannel structure */
+ struct ccw1 * cpa, /* logical channel prog addr */
+ __u8 lpm, /* logical path mask */
+ __u8 key) /* storage key */
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ union orb *orb = &priv->orb;
+ int ccode;
+
+ CIO_TRACE_EVENT(5, "stIO");
+ CIO_TRACE_EVENT(5, dev_name(&sch->dev));
+
+ memset(orb, 0, sizeof(union orb));
+ /* sch is always under 2G. */
+ orb->cmd.intparm = (u32)(addr_t)sch;
+ orb->cmd.fmt = 1;
+
+ orb->cmd.pfch = priv->options.prefetch == 0;
+ orb->cmd.spnd = priv->options.suspend;
+ orb->cmd.ssic = priv->options.suspend && priv->options.inter;
+ orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
+ /*
+ * for 64 bit we always support 64 bit IDAWs with 4k page size only
+ */
+ orb->cmd.c64 = 1;
+ orb->cmd.i2k = 0;
+ orb->cmd.key = key >> 4;
+ /* issue "Start Subchannel" */
+ orb->cmd.cpa = (__u32) __pa(cpa);
+ ccode = ssch(sch->schid, orb);
+
+ /* process condition code */
+ CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0:
+ /*
+ * initialize device status information
+ */
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
+ return 0;
+ case 1: /* status pending */
+ case 2: /* busy */
+ return -EBUSY;
+ case 3: /* device/path not operational */
+ return cio_start_handle_notoper(sch, lpm);
+ default:
+ return ccode;
+ }
+}
+
+int
+cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
+{
+ return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
+}
+
+/*
+ * resume suspended I/O operation
+ */
+int
+cio_resume (struct subchannel *sch)
+{
+ int ccode;
+
+ CIO_TRACE_EVENT(4, "resIO");
+ CIO_TRACE_EVENT(4, dev_name(&sch->dev));
+
+ ccode = rsch (sch->schid);
+
+ CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0:
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
+ return 0;
+ case 1:
+ return -EBUSY;
+ case 2:
+ return -EINVAL;
+ default:
+ /*
+ * useless to wait for request completion
+ * as device is no longer operational !
+ */
+ return -ENODEV;
+ }
+}
+
+/*
+ * halt I/O operation
+ */
+int
+cio_halt(struct subchannel *sch)
+{
+ int ccode;
+
+ if (!sch)
+ return -ENODEV;
+
+ CIO_TRACE_EVENT(2, "haltIO");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ /*
+ * Issue "Halt subchannel" and process condition code
+ */
+ ccode = hsch (sch->schid);
+
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0:
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
+ return 0;
+ case 1: /* status pending */
+ case 2: /* busy */
+ return -EBUSY;
+ default: /* device not operational */
+ return -ENODEV;
+ }
+}
+
+/*
+ * Clear I/O operation
+ */
+int
+cio_clear(struct subchannel *sch)
+{
+ int ccode;
+
+ if (!sch)
+ return -ENODEV;
+
+ CIO_TRACE_EVENT(2, "clearIO");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ /*
+ * Issue "Clear subchannel" and process condition code
+ */
+ ccode = csch (sch->schid);
+
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0:
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
+ return 0;
+ default: /* device not operational */
+ return -ENODEV;
+ }
+}
+
+/*
+ * Function: cio_cancel
+ * Issues a "Cancel Subchannel" on the specified subchannel
+ * Note: We don't need any fancy intparms and flags here
+ * since xsch is executed synchronously.
+ * Only for common I/O internal use as for now.
+ */
+int
+cio_cancel (struct subchannel *sch)
+{
+ int ccode;
+
+ if (!sch)
+ return -ENODEV;
+
+ CIO_TRACE_EVENT(2, "cancelIO");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ ccode = xsch (sch->schid);
+
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0: /* success */
+ /* Update information in scsw. */
+ if (cio_update_schib(sch))
+ return -ENODEV;
+ return 0;
+ case 1: /* status pending */
+ return -EBUSY;
+ case 2: /* not applicable */
+ return -EINVAL;
+ default: /* not oper */
+ return -ENODEV;
+ }
+}
+
+
+static void cio_apply_config(struct subchannel *sch, struct schib *schib)
+{
+ schib->pmcw.intparm = sch->config.intparm;
+ schib->pmcw.mbi = sch->config.mbi;
+ schib->pmcw.isc = sch->config.isc;
+ schib->pmcw.ena = sch->config.ena;
+ schib->pmcw.mme = sch->config.mme;
+ schib->pmcw.mp = sch->config.mp;
+ schib->pmcw.csense = sch->config.csense;
+ schib->pmcw.mbfc = sch->config.mbfc;
+ if (sch->config.mbfc)
+ schib->mba = sch->config.mba;
+}
+
+static int cio_check_config(struct subchannel *sch, struct schib *schib)
+{
+ return (schib->pmcw.intparm == sch->config.intparm) &&
+ (schib->pmcw.mbi == sch->config.mbi) &&
+ (schib->pmcw.isc == sch->config.isc) &&
+ (schib->pmcw.ena == sch->config.ena) &&
+ (schib->pmcw.mme == sch->config.mme) &&
+ (schib->pmcw.mp == sch->config.mp) &&
+ (schib->pmcw.csense == sch->config.csense) &&
+ (schib->pmcw.mbfc == sch->config.mbfc) &&
+ (!sch->config.mbfc || (schib->mba == sch->config.mba));
+}
+
+/*
+ * cio_commit_config - apply configuration to the subchannel
+ */
+int cio_commit_config(struct subchannel *sch)
+{
+ int ccode, retry, ret = 0;
+ struct schib schib;
+ struct irb irb;
+
+ if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ return -ENODEV;
+
+ for (retry = 0; retry < 5; retry++) {
+ /* copy desired changes to local schib */
+ cio_apply_config(sch, &schib);
+ ccode = msch_err(sch->schid, &schib);
+ if (ccode < 0) /* -EIO if msch gets a program check. */
+ return ccode;
+ switch (ccode) {
+ case 0: /* successful */
+ if (stsch_err(sch->schid, &schib) ||
+ !css_sch_is_valid(&schib))
+ return -ENODEV;
+ if (cio_check_config(sch, &schib)) {
+ /* commit changes from local schib */
+ memcpy(&sch->schib, &schib, sizeof(schib));
+ return 0;
+ }
+ ret = -EAGAIN;
+ break;
+ case 1: /* status pending */
+ ret = -EBUSY;
+ if (tsch(sch->schid, &irb))
+ return ret;
+ break;
+ case 2: /* busy */
+ udelay(100); /* allow for recovery */
+ ret = -EBUSY;
+ break;
+ case 3: /* not operational */
+ return -ENODEV;
+ }
+ }
+ return ret;
+}
+
+/**
+ * cio_update_schib - Perform stsch and update schib if subchannel is valid.
+ * @sch: subchannel on which to perform stsch
+ * Return zero on success, -ENODEV otherwise.
+ */
+int cio_update_schib(struct subchannel *sch)
+{
+ struct schib schib;
+
+ if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ return -ENODEV;
+
+ memcpy(&sch->schib, &schib, sizeof(schib));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cio_update_schib);
+
+/**
+ * cio_enable_subchannel - enable a subchannel.
+ * @sch: subchannel to be enabled
+ * @intparm: interruption parameter to set
+ */
+int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
+{
+ int ret;
+
+ CIO_TRACE_EVENT(2, "ensch");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ if (sch_is_pseudo_sch(sch))
+ return -EINVAL;
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ sch->config.ena = 1;
+ sch->config.isc = sch->isc;
+ sch->config.intparm = intparm;
+
+ ret = cio_commit_config(sch);
+ if (ret == -EIO) {
+ /*
+ * Got a program check in msch. Try without
+ * the concurrent sense bit the next time.
+ */
+ sch->config.csense = 0;
+ ret = cio_commit_config(sch);
+ }
+ CIO_HEX_EVENT(2, &ret, sizeof(ret));
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cio_enable_subchannel);
+
+/**
+ * cio_disable_subchannel - disable a subchannel.
+ * @sch: subchannel to disable
+ */
+int cio_disable_subchannel(struct subchannel *sch)
+{
+ int ret;
+
+ CIO_TRACE_EVENT(2, "dissch");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ if (sch_is_pseudo_sch(sch))
+ return 0;
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ sch->config.ena = 0;
+ ret = cio_commit_config(sch);
+
+ CIO_HEX_EVENT(2, &ret, sizeof(ret));
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cio_disable_subchannel);
+
+static int cio_check_devno_blacklisted(struct subchannel *sch)
+{
+ if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
+ /*
+ * This device must not be known to Linux. So we simply
+ * say that there is no device and return ENODEV.
+ */
+ CIO_MSG_EVENT(6, "Blacklisted device detected "
+ "at devno %04X, subchannel set %x\n",
+ sch->schib.pmcw.dev, sch->schid.ssid);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int cio_validate_io_subchannel(struct subchannel *sch)
+{
+ /* Initialization for io subchannels. */
+ if (!css_sch_is_valid(&sch->schib))
+ return -ENODEV;
+
+ /* Devno is valid. */
+ return cio_check_devno_blacklisted(sch);
+}
+
+static int cio_validate_msg_subchannel(struct subchannel *sch)
+{
+ /* Initialization for message subchannels. */
+ if (!css_sch_is_valid(&sch->schib))
+ return -ENODEV;
+
+ /* Devno is valid. */
+ return cio_check_devno_blacklisted(sch);
+}
+
+/**
+ * cio_validate_subchannel - basic validation of subchannel
+ * @sch: subchannel structure to be filled out
+ * @schid: subchannel id
+ *
+ * Find out subchannel type and initialize struct subchannel.
+ * Return codes:
+ * 0 on success
+ * -ENXIO for non-defined subchannels
+ * -ENODEV for invalid subchannels or blacklisted devices
+ * -EIO for subchannels in an invalid subchannel set
+ */
+int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
+{
+ char dbf_txt[15];
+ int ccode;
+ int err;
+
+ sprintf(dbf_txt, "valsch%x", schid.sch_no);
+ CIO_TRACE_EVENT(4, dbf_txt);
+
+ /*
+ * The first subchannel that is not-operational (ccode==3)
+ * indicates that there aren't any more devices available.
+ * If stsch gets an exception, it means the current subchannel set
+ * is not valid.
+ */
+ ccode = stsch_err(schid, &sch->schib);
+ if (ccode) {
+ err = (ccode == 3) ? -ENXIO : ccode;
+ goto out;
+ }
+ sch->st = sch->schib.pmcw.st;
+ sch->schid = schid;
+
+ switch (sch->st) {
+ case SUBCHANNEL_TYPE_IO:
+ err = cio_validate_io_subchannel(sch);
+ break;
+ case SUBCHANNEL_TYPE_MSG:
+ err = cio_validate_msg_subchannel(sch);
+ break;
+ default:
+ err = 0;
+ }
+ if (err)
+ goto out;
+
+ CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
+ sch->schid.ssid, sch->schid.sch_no, sch->st);
+out:
+ return err;
+}
+
+/*
+ * do_cio_interrupt() handles all normal I/O device IRQ's
+ */
+static irqreturn_t do_cio_interrupt(int irq, void *dummy)
+{
+ struct tpi_info *tpi_info;
+ struct subchannel *sch;
+ struct irb *irb;
+
+ set_cpu_flag(CIF_NOHZ_DELAY);
+ tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+ irb = this_cpu_ptr(&cio_irb);
+ sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
+ if (!sch) {
+ /* Clear pending interrupt condition. */
+ inc_irq_stat(IRQIO_CIO);
+ tsch(tpi_info->schid, irb);
+ return IRQ_HANDLED;
+ }
+ spin_lock(sch->lock);
+ /* Store interrupt response block to lowcore. */
+ if (tsch(tpi_info->schid, irb) == 0) {
+ /* Keep subchannel information word up to date. */
+ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw));
+ /* Call interrupt handler if there is one. */
+ if (sch->driver && sch->driver->irq)
+ sch->driver->irq(sch);
+ else
+ inc_irq_stat(IRQIO_CIO);
+ } else
+ inc_irq_stat(IRQIO_CIO);
+ spin_unlock(sch->lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction io_interrupt = {
+ .name = "IO",
+ .handler = do_cio_interrupt,
+};
+
+void __init init_cio_interrupts(void)
+{
+ irq_set_chip_and_handler(IO_INTERRUPT,
+ &dummy_irq_chip, handle_percpu_irq);
+ setup_irq(IO_INTERRUPT, &io_interrupt);
+}
+
+#ifdef CONFIG_CCW_CONSOLE
+static struct subchannel *console_sch;
+static struct lock_class_key console_sch_key;
+
+/*
+ * Use cio_tsch to update the subchannel status and call the interrupt handler
+ * if status had been pending. Called with the subchannel's lock held.
+ */
+void cio_tsch(struct subchannel *sch)
+{
+ struct irb *irb;
+ int irq_context;
+
+ irb = this_cpu_ptr(&cio_irb);
+ /* Store interrupt response block to lowcore. */
+ if (tsch(sch->schid, irb) != 0)
+ /* Not status pending or not operational. */
+ return;
+ memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
+ /* Call interrupt handler with updated status. */
+ irq_context = in_interrupt();
+ if (!irq_context) {
+ local_bh_disable();
+ irq_enter();
+ }
+ kstat_incr_irq_this_cpu(IO_INTERRUPT);
+ if (sch->driver && sch->driver->irq)
+ sch->driver->irq(sch);
+ else
+ inc_irq_stat(IRQIO_CIO);
+ if (!irq_context) {
+ irq_exit();
+ _local_bh_enable();
+ }
+}
+
+static int cio_test_for_console(struct subchannel_id schid, void *data)
+{
+ struct schib schib;
+
+ if (stsch_err(schid, &schib) != 0)
+ return -ENXIO;
+ if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
+ (schib.pmcw.dev == console_devno)) {
+ console_irq = schid.sch_no;
+ return 1; /* found */
+ }
+ return 0;
+}
+
+static int cio_get_console_sch_no(void)
+{
+ struct subchannel_id schid;
+ struct schib schib;
+
+ init_subchannel_id(&schid);
+ if (console_irq != -1) {
+ /* VM provided us with the irq number of the console. */
+ schid.sch_no = console_irq;
+ if (stsch_err(schid, &schib) != 0 ||
+ (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
+ return -1;
+ console_devno = schib.pmcw.dev;
+ } else if (console_devno != -1) {
+ /* At least the console device number is known. */
+ for_each_subchannel(cio_test_for_console, NULL);
+ }
+ return console_irq;
+}
+
+struct subchannel *cio_probe_console(void)
+{
+ struct subchannel_id schid;
+ struct subchannel *sch;
+ int sch_no, ret;
+
+ sch_no = cio_get_console_sch_no();
+ if (sch_no == -1) {
+ pr_warning("No CCW console was found\n");
+ return ERR_PTR(-ENODEV);
+ }
+ init_subchannel_id(&schid);
+ schid.sch_no = sch_no;
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ return sch;
+
+ lockdep_set_class(sch->lock, &console_sch_key);
+ isc_register(CONSOLE_ISC);
+ sch->config.isc = CONSOLE_ISC;
+ sch->config.intparm = (u32)(addr_t)sch;
+ ret = cio_commit_config(sch);
+ if (ret) {
+ isc_unregister(CONSOLE_ISC);
+ put_device(&sch->dev);
+ return ERR_PTR(ret);
+ }
+ console_sch = sch;
+ return sch;
+}
+
+int cio_is_console(struct subchannel_id schid)
+{
+ if (!console_sch)
+ return 0;
+ return schid_equal(&schid, &console_sch->schid);
+}
+
+void cio_register_early_subchannels(void)
+{
+ int ret;
+
+ if (!console_sch)
+ return;
+
+ ret = css_register_subchannel(console_sch);
+ if (ret)
+ put_device(&console_sch->dev);
+}
+#endif /* CONFIG_CCW_CONSOLE */
+
+static int
+__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
+{
+ int retry, cc;
+
+ cc = 0;
+ for (retry=0;retry<3;retry++) {
+ schib->pmcw.ena = 0;
+ cc = msch_err(schid, schib);
+ if (cc)
+ return (cc==3?-ENODEV:-EBUSY);
+ if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
+ return -ENODEV;
+ if (!schib->pmcw.ena)
+ return 0;
+ }
+ return -EBUSY; /* uhm... */
+}
+
+static int
+__clear_io_subchannel_easy(struct subchannel_id schid)
+{
+ int retry;
+
+ if (csch(schid))
+ return -ENODEV;
+ for (retry=0;retry<20;retry++) {
+ struct tpi_info ti;
+
+ if (tpi(&ti)) {
+ tsch(ti.schid, this_cpu_ptr(&cio_irb));
+ if (schid_equal(&ti.schid, &schid))
+ return 0;
+ }
+ udelay_simple(100);
+ }
+ return -EBUSY;
+}
+
+static void __clear_chsc_subchannel_easy(void)
+{
+ /* It seems we can only wait for a bit here :/ */
+ udelay_simple(100);
+}
+
+static int pgm_check_occured;
+
+static void cio_reset_pgm_check_handler(void)
+{
+ pgm_check_occured = 1;
+}
+
+static int stsch_reset(struct subchannel_id schid, struct schib *addr)
+{
+ int rc;
+
+ pgm_check_occured = 0;
+ s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
+ rc = stsch_err(schid, addr);
+ s390_base_pgm_handler_fn = NULL;
+
+ /* The program check handler could have changed pgm_check_occured. */
+ barrier();
+
+ if (pgm_check_occured)
+ return -EIO;
+ else
+ return rc;
+}
+
+static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
+{
+ struct schib schib;
+
+ if (stsch_reset(schid, &schib))
+ return -ENXIO;
+ if (!schib.pmcw.ena)
+ return 0;
+ switch(__disable_subchannel_easy(schid, &schib)) {
+ case 0:
+ case -ENODEV:
+ break;
+ default: /* -EBUSY */
+ switch (schib.pmcw.st) {
+ case SUBCHANNEL_TYPE_IO:
+ if (__clear_io_subchannel_easy(schid))
+ goto out; /* give up... */
+ break;
+ case SUBCHANNEL_TYPE_CHSC:
+ __clear_chsc_subchannel_easy();
+ break;
+ default:
+ /* No default clear strategy */
+ break;
+ }
+ stsch_err(schid, &schib);
+ __disable_subchannel_easy(schid, &schib);
+ }
+out:
+ return 0;
+}
+
+static atomic_t chpid_reset_count;
+
+static void s390_reset_chpids_mcck_handler(void)
+{
+ struct crw crw;
+ struct mci *mci;
+
+ /* Check for pending channel report word. */
+ mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
+ if (!mci->cp)
+ return;
+ /* Process channel report words. */
+ while (stcrw(&crw) == 0) {
+ /* Check for responses to RCHP. */
+ if (crw.slct && crw.rsc == CRW_RSC_CPATH)
+ atomic_dec(&chpid_reset_count);
+ }
+}
+
+#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
+static void css_reset(void)
+{
+ int i, ret;
+ unsigned long long timeout;
+ struct chp_id chpid;
+
+ /* Reset subchannels. */
+ for_each_subchannel(__shutdown_subchannel_easy, NULL);
+ /* Reset channel paths. */
+ s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
+ /* Enable channel report machine checks. */
+ __ctl_set_bit(14, 28);
+ /* Temporarily reenable machine checks. */
+ local_mcck_enable();
+ chp_id_init(&chpid);
+ for (i = 0; i <= __MAX_CHPID; i++) {
+ chpid.id = i;
+ ret = rchp(chpid);
+ if ((ret == 0) || (ret == 2))
+ /*
+ * rchp either succeeded, or another rchp is already
+ * in progress. In either case, we'll get a crw.
+ */
+ atomic_inc(&chpid_reset_count);
+ }
+ /* Wait for machine check for all channel paths. */
+ timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
+ while (atomic_read(&chpid_reset_count) != 0) {
+ if (get_tod_clock_fast() > timeout)
+ break;
+ cpu_relax();
+ }
+ /* Disable machine checks again. */
+ local_mcck_disable();
+ /* Disable channel report machine checks. */
+ __ctl_clear_bit(14, 28);
+ s390_base_mcck_handler_fn = NULL;
+}
+
+static struct reset_call css_reset_call = {
+ .fn = css_reset,
+};
+
+static int __init init_css_reset_call(void)
+{
+ atomic_set(&chpid_reset_count, 0);
+ register_reset_call(&css_reset_call);
+ return 0;
+}
+
+arch_initcall(init_css_reset_call);
+
+struct sch_match_id {
+ struct subchannel_id schid;
+ struct ccw_dev_id devid;
+ int rc;
+};
+
+static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
+{
+ struct schib schib;
+ struct sch_match_id *match_id = data;
+
+ if (stsch_reset(schid, &schib))
+ return -ENXIO;
+ if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
+ (schib.pmcw.dev == match_id->devid.devno) &&
+ (schid.ssid == match_id->devid.ssid)) {
+ match_id->schid = schid;
+ match_id->rc = 0;
+ return 1;
+ }
+ return 0;
+}
+
+static int reipl_find_schid(struct ccw_dev_id *devid,
+ struct subchannel_id *schid)
+{
+ struct sch_match_id match_id;
+
+ match_id.devid = *devid;
+ match_id.rc = -ENODEV;
+ for_each_subchannel(__reipl_subchannel_match, &match_id);
+ if (match_id.rc == 0)
+ *schid = match_id.schid;
+ return match_id.rc;
+}
+
+extern void do_reipl_asm(__u32 schid);
+
+/* Make sure all subchannels are quiet before we re-ipl an lpar. */
+void reipl_ccw_dev(struct ccw_dev_id *devid)
+{
+ struct subchannel_id uninitialized_var(schid);
+
+ s390_reset_system(NULL, NULL, NULL);
+ if (reipl_find_schid(devid, &schid) != 0)
+ panic("IPL Device not found\n");
+ do_reipl_asm(*((__u32*)&schid));
+}
+
+int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
+{
+ struct subchannel_id schid;
+ struct schib schib;
+
+ schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
+ if (!schid.one)
+ return -ENODEV;
+ if (stsch_err(schid, &schib))
+ return -ENODEV;
+ if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
+ return -ENODEV;
+ if (!schib.pmcw.dnv)
+ return -ENODEV;
+ iplinfo->devno = schib.pmcw.dev;
+ iplinfo->is_qdio = schib.pmcw.qf;
+ return 0;
+}
+
+/**
+ * cio_tm_start_key - perform start function
+ * @sch: subchannel on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ *
+ * Start the tcw on the given subchannel. Return zero on success, non-zero
+ * otherwise.
+ */
+int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
+{
+ int cc;
+ union orb *orb = &to_io_private(sch)->orb;
+
+ memset(orb, 0, sizeof(union orb));
+ orb->tm.intparm = (u32) (addr_t) sch;
+ orb->tm.key = key >> 4;
+ orb->tm.b = 1;
+ orb->tm.lpm = lpm ? lpm : sch->lpm;
+ orb->tm.tcw = (u32) (addr_t) tcw;
+ cc = ssch(sch->schid, orb);
+ switch (cc) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ return -EBUSY;
+ default:
+ return cio_start_handle_notoper(sch, lpm);
+ }
+}
+
+/**
+ * cio_tm_intrg - perform interrogate function
+ * @sch - subchannel on which to perform the interrogate function
+ *
+ * If the specified subchannel is running in transport-mode, perform the
+ * interrogate function. Return zero on success, non-zero otherwie.
+ */
+int cio_tm_intrg(struct subchannel *sch)
+{
+ int cc;
+
+ if (!to_io_private(sch)->orb.tm.b)
+ return -EINVAL;
+ cc = xsch(sch->schid);
+ switch (cc) {
+ case 0:
+ case 2:
+ return 0;
+ case 1:
+ return -EBUSY;
+ default:
+ return -ENODEV;
+ }
+}
diff --git a/kernel/drivers/s390/cio/cio.h b/kernel/drivers/s390/cio/cio.h
new file mode 100644
index 000000000..a01376ae1
--- /dev/null
+++ b/kernel/drivers/s390/cio/cio.h
@@ -0,0 +1,137 @@
+#ifndef S390_CIO_H
+#define S390_CIO_H
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <asm/chpid.h>
+#include <asm/cio.h>
+#include <asm/fcx.h>
+#include <asm/schid.h>
+#include "chsc.h"
+
+/*
+ * path management control word
+ */
+struct pmcw {
+ u32 intparm; /* interruption parameter */
+ u32 qf : 1; /* qdio facility */
+ u32 w : 1;
+ u32 isc : 3; /* interruption sublass */
+ u32 res5 : 3; /* reserved zeros */
+ u32 ena : 1; /* enabled */
+ u32 lm : 2; /* limit mode */
+ u32 mme : 2; /* measurement-mode enable */
+ u32 mp : 1; /* multipath mode */
+ u32 tf : 1; /* timing facility */
+ u32 dnv : 1; /* device number valid */
+ u32 dev : 16; /* device number */
+ u8 lpm; /* logical path mask */
+ u8 pnom; /* path not operational mask */
+ u8 lpum; /* last path used mask */
+ u8 pim; /* path installed mask */
+ u16 mbi; /* measurement-block index */
+ u8 pom; /* path operational mask */
+ u8 pam; /* path available mask */
+ u8 chpid[8]; /* CHPID 0-7 (if available) */
+ u32 unused1 : 8; /* reserved zeros */
+ u32 st : 3; /* subchannel type */
+ u32 unused2 : 18; /* reserved zeros */
+ u32 mbfc : 1; /* measurement block format control */
+ u32 xmwme : 1; /* extended measurement word mode enable */
+ u32 csense : 1; /* concurrent sense; can be enabled ...*/
+ /* ... per MSCH, however, if facility */
+ /* ... is not installed, this results */
+ /* ... in an operand exception. */
+} __attribute__ ((packed));
+
+/* Target SCHIB configuration. */
+struct schib_config {
+ u64 mba;
+ u32 intparm;
+ u16 mbi;
+ u32 isc:3;
+ u32 ena:1;
+ u32 mme:2;
+ u32 mp:1;
+ u32 csense:1;
+ u32 mbfc:1;
+} __attribute__ ((packed));
+
+/*
+ * subchannel information block
+ */
+struct schib {
+ struct pmcw pmcw; /* path management control word */
+ union scsw scsw; /* subchannel status word */
+ __u64 mba; /* measurement block address */
+ __u8 mda[4]; /* model dependent area */
+} __attribute__ ((packed,aligned(4)));
+
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
+enum sch_todo {
+ SCH_TODO_NOTHING,
+ SCH_TODO_EVAL,
+ SCH_TODO_UNREG,
+};
+
+/* subchannel data structure used by I/O subroutines */
+struct subchannel {
+ struct subchannel_id schid;
+ spinlock_t *lock; /* subchannel lock */
+ struct mutex reg_mutex;
+ enum {
+ SUBCHANNEL_TYPE_IO = 0,
+ SUBCHANNEL_TYPE_CHSC = 1,
+ SUBCHANNEL_TYPE_MSG = 2,
+ SUBCHANNEL_TYPE_ADM = 3,
+ } st; /* subchannel type */
+ __u8 vpm; /* verified path mask */
+ __u8 lpm; /* logical path mask */
+ __u8 opm; /* operational path mask */
+ struct schib schib; /* subchannel information block */
+ int isc; /* desired interruption subclass */
+ struct chsc_ssd_info ssd_info; /* subchannel description */
+ struct device dev; /* entry in device tree */
+ struct css_driver *driver;
+ enum sch_todo todo;
+ struct work_struct todo_work;
+ struct schib_config config;
+} __attribute__ ((aligned(8)));
+
+DECLARE_PER_CPU(struct irb, cio_irb);
+
+#define to_subchannel(n) container_of(n, struct subchannel, dev)
+
+extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
+extern int cio_enable_subchannel(struct subchannel *, u32);
+extern int cio_disable_subchannel (struct subchannel *);
+extern int cio_cancel (struct subchannel *);
+extern int cio_clear (struct subchannel *);
+extern int cio_resume (struct subchannel *);
+extern int cio_halt (struct subchannel *);
+extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
+extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
+extern int cio_cancel (struct subchannel *);
+extern int cio_set_options (struct subchannel *, int);
+extern int cio_update_schib(struct subchannel *sch);
+extern int cio_commit_config(struct subchannel *sch);
+
+int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
+int cio_tm_intrg(struct subchannel *sch);
+
+/* Use with care. */
+#ifdef CONFIG_CCW_CONSOLE
+extern struct subchannel *cio_probe_console(void);
+extern int cio_is_console(struct subchannel_id);
+extern void cio_register_early_subchannels(void);
+extern void cio_tsch(struct subchannel *sch);
+#else
+#define cio_is_console(schid) 0
+static inline void cio_register_early_subchannels(void) {}
+#endif
+
+#endif
diff --git a/kernel/drivers/s390/cio/cio_debug.h b/kernel/drivers/s390/cio/cio_debug.h
new file mode 100644
index 000000000..e64e8278c
--- /dev/null
+++ b/kernel/drivers/s390/cio/cio_debug.h
@@ -0,0 +1,34 @@
+#ifndef CIO_DEBUG_H
+#define CIO_DEBUG_H
+
+#include <asm/debug.h>
+
+/* for use of debug feature */
+extern debug_info_t *cio_debug_msg_id;
+extern debug_info_t *cio_debug_trace_id;
+extern debug_info_t *cio_debug_crw_id;
+
+#define CIO_TRACE_EVENT(imp, txt) do { \
+ debug_text_event(cio_debug_trace_id, imp, txt); \
+ } while (0)
+
+#define CIO_MSG_EVENT(imp, args...) do { \
+ debug_sprintf_event(cio_debug_msg_id, imp , ##args); \
+ } while (0)
+
+#define CIO_CRW_EVENT(imp, args...) do { \
+ debug_sprintf_event(cio_debug_crw_id, imp , ##args); \
+ } while (0)
+
+static inline void CIO_HEX_EVENT(int level, void *data, int length)
+{
+ if (unlikely(!cio_debug_trace_id))
+ return;
+ while (length > 0) {
+ debug_event(cio_debug_trace_id, level, data, length);
+ length -= cio_debug_trace_id->buf_size;
+ data += cio_debug_trace_id->buf_size;
+ }
+}
+
+#endif
diff --git a/kernel/drivers/s390/cio/cmf.c b/kernel/drivers/s390/cio/cmf.c
new file mode 100644
index 000000000..23054f8fa
--- /dev/null
+++ b/kernel/drivers/s390/cio/cmf.c
@@ -0,0 +1,1347 @@
+/*
+ * Linux on zSeries Channel Measurement Facility support
+ *
+ * Copyright IBM Corp. 2000, 2006
+ *
+ * Authors: Arnd Bergmann <arndb@de.ibm.com>
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/bootmem.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/timex.h> /* get_tod_clock() */
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/cmb.h>
+#include <asm/div64.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "chsc.h"
+
+/*
+ * parameter to enable cmf during boot, possible uses are:
+ * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
+ * used on any subchannel
+ * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
+ * <num> subchannel, where <num> is an integer
+ * between 1 and 65535, default is 1024
+ */
+#define ARGSTRING "s390cmf"
+
+/* indices for READCMB */
+enum cmb_index {
+ /* basic and exended format: */
+ cmb_ssch_rsch_count,
+ cmb_sample_count,
+ cmb_device_connect_time,
+ cmb_function_pending_time,
+ cmb_device_disconnect_time,
+ cmb_control_unit_queuing_time,
+ cmb_device_active_only_time,
+ /* extended format only: */
+ cmb_device_busy_time,
+ cmb_initial_command_response_time,
+};
+
+/**
+ * enum cmb_format - types of supported measurement block formats
+ *
+ * @CMF_BASIC: traditional channel measurement blocks supported
+ * by all machines that we run on
+ * @CMF_EXTENDED: improved format that was introduced with the z990
+ * machine
+ * @CMF_AUTODETECT: default: use extended format when running on a machine
+ * supporting extended format, otherwise fall back to
+ * basic format
+ */
+enum cmb_format {
+ CMF_BASIC,
+ CMF_EXTENDED,
+ CMF_AUTODETECT = -1,
+};
+
+/*
+ * format - actual format for all measurement blocks
+ *
+ * The format module parameter can be set to a value of 0 (zero)
+ * or 1, indicating basic or extended format as described for
+ * enum cmb_format.
+ */
+static int format = CMF_AUTODETECT;
+module_param(format, bint, 0444);
+
+/**
+ * struct cmb_operations - functions to use depending on cmb_format
+ *
+ * Most of these functions operate on a struct ccw_device. There is only
+ * one instance of struct cmb_operations because the format of the measurement
+ * data is guaranteed to be the same for every ccw_device.
+ *
+ * @alloc: allocate memory for a channel measurement block,
+ * either with the help of a special pool or with kmalloc
+ * @free: free memory allocated with @alloc
+ * @set: enable or disable measurement
+ * @read: read a measurement entry at an index
+ * @readall: read a measurement block in a common format
+ * @reset: clear the data in the associated measurement block and
+ * reset its time stamp
+ * @align: align an allocated block so that the hardware can use it
+ */
+struct cmb_operations {
+ int (*alloc) (struct ccw_device *);
+ void (*free) (struct ccw_device *);
+ int (*set) (struct ccw_device *, u32);
+ u64 (*read) (struct ccw_device *, int);
+ int (*readall)(struct ccw_device *, struct cmbdata *);
+ void (*reset) (struct ccw_device *);
+ void *(*align) (void *);
+/* private: */
+ struct attribute_group *attr_group;
+};
+static struct cmb_operations *cmbops;
+
+struct cmb_data {
+ void *hw_block; /* Pointer to block updated by hardware */
+ void *last_block; /* Last changed block copied from hardware block */
+ int size; /* Size of hw_block and last_block */
+ unsigned long long last_update; /* when last_block was updated */
+};
+
+/*
+ * Our user interface is designed in terms of nanoseconds,
+ * while the hardware measures total times in its own
+ * unit.
+ */
+static inline u64 time_to_nsec(u32 value)
+{
+ return ((u64)value) * 128000ull;
+}
+
+/*
+ * Users are usually interested in average times,
+ * not accumulated time.
+ * This also helps us with atomicity problems
+ * when reading sinlge values.
+ */
+static inline u64 time_to_avg_nsec(u32 value, u32 count)
+{
+ u64 ret;
+
+ /* no samples yet, avoid division by 0 */
+ if (count == 0)
+ return 0;
+
+ /* value comes in units of 128 µsec */
+ ret = time_to_nsec(value);
+ do_div(ret, count);
+
+ return ret;
+}
+
+/*
+ * Activate or deactivate the channel monitor. When area is NULL,
+ * the monitor is deactivated. The channel monitor needs to
+ * be active in order to measure subchannels, which also need
+ * to be enabled.
+ */
+static inline void cmf_activate(void *area, unsigned int onoff)
+{
+ register void * __gpr2 asm("2");
+ register long __gpr1 asm("1");
+
+ __gpr2 = area;
+ __gpr1 = onoff ? 2 : 0;
+ /* activate channel measurement */
+ asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
+}
+
+static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
+ unsigned long address)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ sch->config.mme = mme;
+ sch->config.mbfc = mbfc;
+ /* address can be either a block address or a block index */
+ if (mbfc)
+ sch->config.mba = address;
+ else
+ sch->config.mbi = address;
+
+ return cio_commit_config(sch);
+}
+
+struct set_schib_struct {
+ u32 mme;
+ int mbfc;
+ unsigned long address;
+ wait_queue_head_t wait;
+ int ret;
+ struct kref kref;
+};
+
+static void cmf_set_schib_release(struct kref *kref)
+{
+ struct set_schib_struct *set_data;
+
+ set_data = container_of(kref, struct set_schib_struct, kref);
+ kfree(set_data);
+}
+
+#define CMF_PENDING 1
+
+static int set_schib_wait(struct ccw_device *cdev, u32 mme,
+ int mbfc, unsigned long address)
+{
+ struct set_schib_struct *set_data;
+ int ret;
+
+ spin_lock_irq(cdev->ccwlock);
+ if (!cdev->private->cmb) {
+ ret = -ENODEV;
+ goto out;
+ }
+ set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
+ if (!set_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ init_waitqueue_head(&set_data->wait);
+ kref_init(&set_data->kref);
+ set_data->mme = mme;
+ set_data->mbfc = mbfc;
+ set_data->address = address;
+
+ ret = set_schib(cdev, mme, mbfc, address);
+ if (ret != -EBUSY)
+ goto out_put;
+
+ if (cdev->private->state != DEV_STATE_ONLINE) {
+ /* if the device is not online, don't even try again */
+ ret = -EBUSY;
+ goto out_put;
+ }
+
+ cdev->private->state = DEV_STATE_CMFCHANGE;
+ set_data->ret = CMF_PENDING;
+ cdev->private->cmb_wait = set_data;
+
+ spin_unlock_irq(cdev->ccwlock);
+ if (wait_event_interruptible(set_data->wait,
+ set_data->ret != CMF_PENDING)) {
+ spin_lock_irq(cdev->ccwlock);
+ if (set_data->ret == CMF_PENDING) {
+ set_data->ret = -ERESTARTSYS;
+ if (cdev->private->state == DEV_STATE_CMFCHANGE)
+ cdev->private->state = DEV_STATE_ONLINE;
+ }
+ spin_unlock_irq(cdev->ccwlock);
+ }
+ spin_lock_irq(cdev->ccwlock);
+ cdev->private->cmb_wait = NULL;
+ ret = set_data->ret;
+out_put:
+ kref_put(&set_data->kref, cmf_set_schib_release);
+out:
+ spin_unlock_irq(cdev->ccwlock);
+ return ret;
+}
+
+void retry_set_schib(struct ccw_device *cdev)
+{
+ struct set_schib_struct *set_data;
+
+ set_data = cdev->private->cmb_wait;
+ if (!set_data) {
+ WARN_ON(1);
+ return;
+ }
+ kref_get(&set_data->kref);
+ set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
+ set_data->address);
+ wake_up(&set_data->wait);
+ kref_put(&set_data->kref, cmf_set_schib_release);
+}
+
+static int cmf_copy_block(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ void *reference_buf;
+ void *hw_block;
+ struct cmb_data *cmb_data;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
+ /* Don't copy if a start function is in progress. */
+ if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
+ (scsw_actl(&sch->schib.scsw) &
+ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
+ (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
+ return -EBUSY;
+ }
+ cmb_data = cdev->private->cmb;
+ hw_block = cmbops->align(cmb_data->hw_block);
+ if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
+ /* No need to copy. */
+ return 0;
+ reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
+ if (!reference_buf)
+ return -ENOMEM;
+ /* Ensure consistency of block copied from hardware. */
+ do {
+ memcpy(cmb_data->last_block, hw_block, cmb_data->size);
+ memcpy(reference_buf, hw_block, cmb_data->size);
+ } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
+ cmb_data->last_update = get_tod_clock();
+ kfree(reference_buf);
+ return 0;
+}
+
+struct copy_block_struct {
+ wait_queue_head_t wait;
+ int ret;
+ struct kref kref;
+};
+
+static void cmf_copy_block_release(struct kref *kref)
+{
+ struct copy_block_struct *copy_block;
+
+ copy_block = container_of(kref, struct copy_block_struct, kref);
+ kfree(copy_block);
+}
+
+static int cmf_cmb_copy_wait(struct ccw_device *cdev)
+{
+ struct copy_block_struct *copy_block;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ ret = -ENODEV;
+ goto out;
+ }
+ copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
+ if (!copy_block) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ init_waitqueue_head(&copy_block->wait);
+ kref_init(&copy_block->kref);
+
+ ret = cmf_copy_block(cdev);
+ if (ret != -EBUSY)
+ goto out_put;
+
+ if (cdev->private->state != DEV_STATE_ONLINE) {
+ ret = -EBUSY;
+ goto out_put;
+ }
+
+ cdev->private->state = DEV_STATE_CMFUPDATE;
+ copy_block->ret = CMF_PENDING;
+ cdev->private->cmb_wait = copy_block;
+
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ if (wait_event_interruptible(copy_block->wait,
+ copy_block->ret != CMF_PENDING)) {
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (copy_block->ret == CMF_PENDING) {
+ copy_block->ret = -ERESTARTSYS;
+ if (cdev->private->state == DEV_STATE_CMFUPDATE)
+ cdev->private->state = DEV_STATE_ONLINE;
+ }
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ }
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cdev->private->cmb_wait = NULL;
+ ret = copy_block->ret;
+out_put:
+ kref_put(&copy_block->kref, cmf_copy_block_release);
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
+}
+
+void cmf_retry_copy_block(struct ccw_device *cdev)
+{
+ struct copy_block_struct *copy_block;
+
+ copy_block = cdev->private->cmb_wait;
+ if (!copy_block) {
+ WARN_ON(1);
+ return;
+ }
+ kref_get(&copy_block->kref);
+ copy_block->ret = cmf_copy_block(cdev);
+ wake_up(&copy_block->wait);
+ kref_put(&copy_block->kref, cmf_copy_block_release);
+}
+
+static void cmf_generic_reset(struct ccw_device *cdev)
+{
+ struct cmb_data *cmb_data;
+
+ spin_lock_irq(cdev->ccwlock);
+ cmb_data = cdev->private->cmb;
+ if (cmb_data) {
+ memset(cmb_data->last_block, 0, cmb_data->size);
+ /*
+ * Need to reset hw block as well to make the hardware start
+ * from 0 again.
+ */
+ memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
+ cmb_data->last_update = 0;
+ }
+ cdev->private->cmb_start_time = get_tod_clock();
+ spin_unlock_irq(cdev->ccwlock);
+}
+
+/**
+ * struct cmb_area - container for global cmb data
+ *
+ * @mem: pointer to CMBs (only in basic measurement mode)
+ * @list: contains a linked list of all subchannels
+ * @num_channels: number of channels to be measured
+ * @lock: protect concurrent access to @mem and @list
+ */
+struct cmb_area {
+ struct cmb *mem;
+ struct list_head list;
+ int num_channels;
+ spinlock_t lock;
+};
+
+static struct cmb_area cmb_area = {
+ .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
+ .list = LIST_HEAD_INIT(cmb_area.list),
+ .num_channels = 1024,
+};
+
+/* ****** old style CMB handling ********/
+
+/*
+ * Basic channel measurement blocks are allocated in one contiguous
+ * block of memory, which can not be moved as long as any channel
+ * is active. Therefore, a maximum number of subchannels needs to
+ * be defined somewhere. This is a module parameter, defaulting to
+ * a reasonable value of 1024, or 32 kb of memory.
+ * Current kernels don't allow kmalloc with more than 128kb, so the
+ * maximum is 4096.
+ */
+
+module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
+
+/**
+ * struct cmb - basic channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @reserved: unused in basic measurement mode
+ *
+ * The measurement block as used by the hardware. The fields are described
+ * further in z/Architecture Principles of Operation, chapter 17.
+ *
+ * The cmb area made up from these blocks must be a contiguous array and may
+ * not be reallocated or freed.
+ * Only one cmb area can be present in the system.
+ */
+struct cmb {
+ u16 ssch_rsch_count;
+ u16 sample_count;
+ u32 device_connect_time;
+ u32 function_pending_time;
+ u32 device_disconnect_time;
+ u32 control_unit_queuing_time;
+ u32 device_active_only_time;
+ u32 reserved[2];
+};
+
+/*
+ * Insert a single device into the cmb_area list.
+ * Called with cmb_area.lock held from alloc_cmb.
+ */
+static int alloc_cmb_single(struct ccw_device *cdev,
+ struct cmb_data *cmb_data)
+{
+ struct cmb *cmb;
+ struct ccw_device_private *node;
+ int ret;
+
+ spin_lock_irq(cdev->ccwlock);
+ if (!list_empty(&cdev->private->cmb_list)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Find first unused cmb in cmb_area.mem.
+ * This is a little tricky: cmb_area.list
+ * remains sorted by ->cmb->hw_data pointers.
+ */
+ cmb = cmb_area.mem;
+ list_for_each_entry(node, &cmb_area.list, cmb_list) {
+ struct cmb_data *data;
+ data = node->cmb;
+ if ((struct cmb*)data->hw_block > cmb)
+ break;
+ cmb++;
+ }
+ if (cmb - cmb_area.mem >= cmb_area.num_channels) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* insert new cmb */
+ list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
+ cmb_data->hw_block = cmb;
+ cdev->private->cmb = cmb_data;
+ ret = 0;
+out:
+ spin_unlock_irq(cdev->ccwlock);
+ return ret;
+}
+
+static int alloc_cmb(struct ccw_device *cdev)
+{
+ int ret;
+ struct cmb *mem;
+ ssize_t size;
+ struct cmb_data *cmb_data;
+
+ /* Allocate private cmb_data. */
+ cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
+ if (!cmb_data)
+ return -ENOMEM;
+
+ cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
+ if (!cmb_data->last_block) {
+ kfree(cmb_data);
+ return -ENOMEM;
+ }
+ cmb_data->size = sizeof(struct cmb);
+ spin_lock(&cmb_area.lock);
+
+ if (!cmb_area.mem) {
+ /* there is no user yet, so we need a new area */
+ size = sizeof(struct cmb) * cmb_area.num_channels;
+ WARN_ON(!list_empty(&cmb_area.list));
+
+ spin_unlock(&cmb_area.lock);
+ mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
+ get_order(size));
+ spin_lock(&cmb_area.lock);
+
+ if (cmb_area.mem) {
+ /* ok, another thread was faster */
+ free_pages((unsigned long)mem, get_order(size));
+ } else if (!mem) {
+ /* no luck */
+ ret = -ENOMEM;
+ goto out;
+ } else {
+ /* everything ok */
+ memset(mem, 0, size);
+ cmb_area.mem = mem;
+ cmf_activate(cmb_area.mem, 1);
+ }
+ }
+
+ /* do the actual allocation */
+ ret = alloc_cmb_single(cdev, cmb_data);
+out:
+ spin_unlock(&cmb_area.lock);
+ if (ret) {
+ kfree(cmb_data->last_block);
+ kfree(cmb_data);
+ }
+ return ret;
+}
+
+static void free_cmb(struct ccw_device *cdev)
+{
+ struct ccw_device_private *priv;
+ struct cmb_data *cmb_data;
+
+ spin_lock(&cmb_area.lock);
+ spin_lock_irq(cdev->ccwlock);
+
+ priv = cdev->private;
+
+ if (list_empty(&priv->cmb_list)) {
+ /* already freed */
+ goto out;
+ }
+
+ cmb_data = priv->cmb;
+ priv->cmb = NULL;
+ if (cmb_data)
+ kfree(cmb_data->last_block);
+ kfree(cmb_data);
+ list_del_init(&priv->cmb_list);
+
+ if (list_empty(&cmb_area.list)) {
+ ssize_t size;
+ size = sizeof(struct cmb) * cmb_area.num_channels;
+ cmf_activate(NULL, 0);
+ free_pages((unsigned long)cmb_area.mem, get_order(size));
+ cmb_area.mem = NULL;
+ }
+out:
+ spin_unlock_irq(cdev->ccwlock);
+ spin_unlock(&cmb_area.lock);
+}
+
+static int set_cmb(struct ccw_device *cdev, u32 mme)
+{
+ u16 offset;
+ struct cmb_data *cmb_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return -EINVAL;
+ }
+ cmb_data = cdev->private->cmb;
+ offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+ return set_schib_wait(cdev, mme, 0, offset);
+}
+
+static u64 read_cmb(struct ccw_device *cdev, int index)
+{
+ struct cmb *cmb;
+ u32 val;
+ int ret;
+ unsigned long flags;
+
+ ret = cmf_cmb_copy_wait(cdev);
+ if (ret < 0)
+ return 0;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ ret = 0;
+ goto out;
+ }
+ cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
+
+ switch (index) {
+ case cmb_ssch_rsch_count:
+ ret = cmb->ssch_rsch_count;
+ goto out;
+ case cmb_sample_count:
+ ret = cmb->sample_count;
+ goto out;
+ case cmb_device_connect_time:
+ val = cmb->device_connect_time;
+ break;
+ case cmb_function_pending_time:
+ val = cmb->function_pending_time;
+ break;
+ case cmb_device_disconnect_time:
+ val = cmb->device_disconnect_time;
+ break;
+ case cmb_control_unit_queuing_time:
+ val = cmb->control_unit_queuing_time;
+ break;
+ case cmb_device_active_only_time:
+ val = cmb->device_active_only_time;
+ break;
+ default:
+ ret = 0;
+ goto out;
+ }
+ ret = time_to_avg_nsec(val, cmb->sample_count);
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
+}
+
+static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
+{
+ struct cmb *cmb;
+ struct cmb_data *cmb_data;
+ u64 time;
+ unsigned long flags;
+ int ret;
+
+ ret = cmf_cmb_copy_wait(cdev);
+ if (ret < 0)
+ return ret;
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (cmb_data->last_update == 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ cmb = cmb_data->last_block;
+ time = cmb_data->last_update - cdev->private->cmb_start_time;
+
+ memset(data, 0, sizeof(struct cmbdata));
+
+ /* we only know values before device_busy_time */
+ data->size = offsetof(struct cmbdata, device_busy_time);
+
+ /* convert to nanoseconds */
+ data->elapsed_time = (time * 1000) >> 12;
+
+ /* copy data to new structure */
+ data->ssch_rsch_count = cmb->ssch_rsch_count;
+ data->sample_count = cmb->sample_count;
+
+ /* time fields are converted to nanoseconds while copying */
+ data->device_connect_time = time_to_nsec(cmb->device_connect_time);
+ data->function_pending_time = time_to_nsec(cmb->function_pending_time);
+ data->device_disconnect_time =
+ time_to_nsec(cmb->device_disconnect_time);
+ data->control_unit_queuing_time
+ = time_to_nsec(cmb->control_unit_queuing_time);
+ data->device_active_only_time
+ = time_to_nsec(cmb->device_active_only_time);
+ ret = 0;
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
+}
+
+static void reset_cmb(struct ccw_device *cdev)
+{
+ cmf_generic_reset(cdev);
+}
+
+static void * align_cmb(void *area)
+{
+ return area;
+}
+
+static struct attribute_group cmf_attr_group;
+
+static struct cmb_operations cmbops_basic = {
+ .alloc = alloc_cmb,
+ .free = free_cmb,
+ .set = set_cmb,
+ .read = read_cmb,
+ .readall = readall_cmb,
+ .reset = reset_cmb,
+ .align = align_cmb,
+ .attr_group = &cmf_attr_group,
+};
+
+/* ******** extended cmb handling ********/
+
+/**
+ * struct cmbe - extended channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @device_busy_time: time of device busy
+ * @initial_command_response_time: initial command response time
+ * @reserved: unused
+ *
+ * The measurement block as used by the hardware. May be in any 64 bit physical
+ * location.
+ * The fields are described further in z/Architecture Principles of Operation,
+ * third edition, chapter 17.
+ */
+struct cmbe {
+ u32 ssch_rsch_count;
+ u32 sample_count;
+ u32 device_connect_time;
+ u32 function_pending_time;
+ u32 device_disconnect_time;
+ u32 control_unit_queuing_time;
+ u32 device_active_only_time;
+ u32 device_busy_time;
+ u32 initial_command_response_time;
+ u32 reserved[7];
+};
+
+/*
+ * kmalloc only guarantees 8 byte alignment, but we need cmbe
+ * pointers to be naturally aligned. Make sure to allocate
+ * enough space for two cmbes.
+ */
+static inline struct cmbe *cmbe_align(struct cmbe *c)
+{
+ unsigned long addr;
+ addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
+ ~(sizeof (struct cmbe) - sizeof(long));
+ return (struct cmbe*)addr;
+}
+
+static int alloc_cmbe(struct ccw_device *cdev)
+{
+ struct cmbe *cmbe;
+ struct cmb_data *cmb_data;
+ int ret;
+
+ cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
+ if (!cmbe)
+ return -ENOMEM;
+ cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
+ if (!cmb_data) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
+ if (!cmb_data->last_block) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ cmb_data->size = sizeof(struct cmbe);
+ spin_lock_irq(cdev->ccwlock);
+ if (cdev->private->cmb) {
+ spin_unlock_irq(cdev->ccwlock);
+ ret = -EBUSY;
+ goto out_free;
+ }
+ cmb_data->hw_block = cmbe;
+ cdev->private->cmb = cmb_data;
+ spin_unlock_irq(cdev->ccwlock);
+
+ /* activate global measurement if this is the first channel */
+ spin_lock(&cmb_area.lock);
+ if (list_empty(&cmb_area.list))
+ cmf_activate(NULL, 1);
+ list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
+ spin_unlock(&cmb_area.lock);
+
+ return 0;
+out_free:
+ if (cmb_data)
+ kfree(cmb_data->last_block);
+ kfree(cmb_data);
+ kfree(cmbe);
+ return ret;
+}
+
+static void free_cmbe(struct ccw_device *cdev)
+{
+ struct cmb_data *cmb_data;
+
+ spin_lock_irq(cdev->ccwlock);
+ cmb_data = cdev->private->cmb;
+ cdev->private->cmb = NULL;
+ if (cmb_data)
+ kfree(cmb_data->last_block);
+ kfree(cmb_data);
+ spin_unlock_irq(cdev->ccwlock);
+
+ /* deactivate global measurement if this is the last channel */
+ spin_lock(&cmb_area.lock);
+ list_del_init(&cdev->private->cmb_list);
+ if (list_empty(&cmb_area.list))
+ cmf_activate(NULL, 0);
+ spin_unlock(&cmb_area.lock);
+}
+
+static int set_cmbe(struct ccw_device *cdev, u32 mme)
+{
+ unsigned long mba;
+ struct cmb_data *cmb_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return -EINVAL;
+ }
+ cmb_data = cdev->private->cmb;
+ mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+ return set_schib_wait(cdev, mme, 1, mba);
+}
+
+
+static u64 read_cmbe(struct ccw_device *cdev, int index)
+{
+ struct cmbe *cmb;
+ struct cmb_data *cmb_data;
+ u32 val;
+ int ret;
+ unsigned long flags;
+
+ ret = cmf_cmb_copy_wait(cdev);
+ if (ret < 0)
+ return 0;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data) {
+ ret = 0;
+ goto out;
+ }
+ cmb = cmb_data->last_block;
+
+ switch (index) {
+ case cmb_ssch_rsch_count:
+ ret = cmb->ssch_rsch_count;
+ goto out;
+ case cmb_sample_count:
+ ret = cmb->sample_count;
+ goto out;
+ case cmb_device_connect_time:
+ val = cmb->device_connect_time;
+ break;
+ case cmb_function_pending_time:
+ val = cmb->function_pending_time;
+ break;
+ case cmb_device_disconnect_time:
+ val = cmb->device_disconnect_time;
+ break;
+ case cmb_control_unit_queuing_time:
+ val = cmb->control_unit_queuing_time;
+ break;
+ case cmb_device_active_only_time:
+ val = cmb->device_active_only_time;
+ break;
+ case cmb_device_busy_time:
+ val = cmb->device_busy_time;
+ break;
+ case cmb_initial_command_response_time:
+ val = cmb->initial_command_response_time;
+ break;
+ default:
+ ret = 0;
+ goto out;
+ }
+ ret = time_to_avg_nsec(val, cmb->sample_count);
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
+}
+
+static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
+{
+ struct cmbe *cmb;
+ struct cmb_data *cmb_data;
+ u64 time;
+ unsigned long flags;
+ int ret;
+
+ ret = cmf_cmb_copy_wait(cdev);
+ if (ret < 0)
+ return ret;
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (cmb_data->last_update == 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ time = cmb_data->last_update - cdev->private->cmb_start_time;
+
+ memset (data, 0, sizeof(struct cmbdata));
+
+ /* we only know values before device_busy_time */
+ data->size = offsetof(struct cmbdata, device_busy_time);
+
+ /* conver to nanoseconds */
+ data->elapsed_time = (time * 1000) >> 12;
+
+ cmb = cmb_data->last_block;
+ /* copy data to new structure */
+ data->ssch_rsch_count = cmb->ssch_rsch_count;
+ data->sample_count = cmb->sample_count;
+
+ /* time fields are converted to nanoseconds while copying */
+ data->device_connect_time = time_to_nsec(cmb->device_connect_time);
+ data->function_pending_time = time_to_nsec(cmb->function_pending_time);
+ data->device_disconnect_time =
+ time_to_nsec(cmb->device_disconnect_time);
+ data->control_unit_queuing_time
+ = time_to_nsec(cmb->control_unit_queuing_time);
+ data->device_active_only_time
+ = time_to_nsec(cmb->device_active_only_time);
+ data->device_busy_time = time_to_nsec(cmb->device_busy_time);
+ data->initial_command_response_time
+ = time_to_nsec(cmb->initial_command_response_time);
+
+ ret = 0;
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
+}
+
+static void reset_cmbe(struct ccw_device *cdev)
+{
+ cmf_generic_reset(cdev);
+}
+
+static void * align_cmbe(void *area)
+{
+ return cmbe_align(area);
+}
+
+static struct attribute_group cmf_attr_group_ext;
+
+static struct cmb_operations cmbops_extended = {
+ .alloc = alloc_cmbe,
+ .free = free_cmbe,
+ .set = set_cmbe,
+ .read = read_cmbe,
+ .readall = readall_cmbe,
+ .reset = reset_cmbe,
+ .align = align_cmbe,
+ .attr_group = &cmf_attr_group_ext,
+};
+
+static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
+{
+ return sprintf(buf, "%lld\n",
+ (unsigned long long) cmf_read(to_ccwdev(dev), idx));
+}
+
+static ssize_t cmb_show_avg_sample_interval(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ccw_device *cdev;
+ long interval;
+ unsigned long count;
+ struct cmb_data *cmb_data;
+
+ cdev = to_ccwdev(dev);
+ count = cmf_read(cdev, cmb_sample_count);
+ spin_lock_irq(cdev->ccwlock);
+ cmb_data = cdev->private->cmb;
+ if (count) {
+ interval = cmb_data->last_update -
+ cdev->private->cmb_start_time;
+ interval = (interval * 1000) >> 12;
+ interval /= count;
+ } else
+ interval = -1;
+ spin_unlock_irq(cdev->ccwlock);
+ return sprintf(buf, "%ld\n", interval);
+}
+
+static ssize_t cmb_show_avg_utilization(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cmbdata data;
+ u64 utilization;
+ unsigned long t, u;
+ int ret;
+
+ ret = cmf_readall(to_ccwdev(dev), &data);
+ if (ret == -EAGAIN || ret == -ENODEV)
+ /* No data (yet/currently) available to use for calculation. */
+ return sprintf(buf, "n/a\n");
+ else if (ret)
+ return ret;
+
+ utilization = data.device_connect_time +
+ data.function_pending_time +
+ data.device_disconnect_time;
+
+ /* shift to avoid long long division */
+ while (-1ul < (data.elapsed_time | utilization)) {
+ utilization >>= 8;
+ data.elapsed_time >>= 8;
+ }
+
+ /* calculate value in 0.1 percent units */
+ t = (unsigned long) data.elapsed_time / 1000;
+ u = (unsigned long) utilization / t;
+
+ return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
+}
+
+#define cmf_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(name, 0444, show_##name, NULL);
+
+#define cmf_attr_avg(name) \
+static ssize_t show_avg_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
+
+cmf_attr(ssch_rsch_count);
+cmf_attr(sample_count);
+cmf_attr_avg(device_connect_time);
+cmf_attr_avg(function_pending_time);
+cmf_attr_avg(device_disconnect_time);
+cmf_attr_avg(control_unit_queuing_time);
+cmf_attr_avg(device_active_only_time);
+cmf_attr_avg(device_busy_time);
+cmf_attr_avg(initial_command_response_time);
+
+static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
+ NULL);
+static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
+
+static struct attribute *cmf_attributes[] = {
+ &dev_attr_avg_sample_interval.attr,
+ &dev_attr_avg_utilization.attr,
+ &dev_attr_ssch_rsch_count.attr,
+ &dev_attr_sample_count.attr,
+ &dev_attr_avg_device_connect_time.attr,
+ &dev_attr_avg_function_pending_time.attr,
+ &dev_attr_avg_device_disconnect_time.attr,
+ &dev_attr_avg_control_unit_queuing_time.attr,
+ &dev_attr_avg_device_active_only_time.attr,
+ NULL,
+};
+
+static struct attribute_group cmf_attr_group = {
+ .name = "cmf",
+ .attrs = cmf_attributes,
+};
+
+static struct attribute *cmf_attributes_ext[] = {
+ &dev_attr_avg_sample_interval.attr,
+ &dev_attr_avg_utilization.attr,
+ &dev_attr_ssch_rsch_count.attr,
+ &dev_attr_sample_count.attr,
+ &dev_attr_avg_device_connect_time.attr,
+ &dev_attr_avg_function_pending_time.attr,
+ &dev_attr_avg_device_disconnect_time.attr,
+ &dev_attr_avg_control_unit_queuing_time.attr,
+ &dev_attr_avg_device_active_only_time.attr,
+ &dev_attr_avg_device_busy_time.attr,
+ &dev_attr_avg_initial_command_response_time.attr,
+ NULL,
+};
+
+static struct attribute_group cmf_attr_group_ext = {
+ .name = "cmf",
+ .attrs = cmf_attributes_ext,
+};
+
+static ssize_t cmb_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0);
+}
+
+static ssize_t cmb_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t c)
+{
+ struct ccw_device *cdev;
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ cdev = to_ccwdev(dev);
+
+ switch (val) {
+ case 0:
+ ret = disable_cmf(cdev);
+ break;
+ case 1:
+ ret = enable_cmf(cdev);
+ break;
+ }
+
+ return c;
+}
+
+DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
+
+int ccw_set_cmf(struct ccw_device *cdev, int enable)
+{
+ return cmbops->set(cdev, enable ? 2 : 0);
+}
+
+/**
+ * enable_cmf() - switch on the channel measurement for a specific device
+ * @cdev: The ccw device to be enabled
+ *
+ * Returns %0 for success or a negative error value.
+ *
+ * Context:
+ * non-atomic
+ */
+int enable_cmf(struct ccw_device *cdev)
+{
+ int ret;
+
+ ret = cmbops->alloc(cdev);
+ cmbops->reset(cdev);
+ if (ret)
+ return ret;
+ ret = cmbops->set(cdev, 2);
+ if (ret) {
+ cmbops->free(cdev);
+ return ret;
+ }
+ ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
+ if (!ret)
+ return 0;
+ cmbops->set(cdev, 0); //FIXME: this can fail
+ cmbops->free(cdev);
+ return ret;
+}
+
+/**
+ * disable_cmf() - switch off the channel measurement for a specific device
+ * @cdev: The ccw device to be disabled
+ *
+ * Returns %0 for success or a negative error value.
+ *
+ * Context:
+ * non-atomic
+ */
+int disable_cmf(struct ccw_device *cdev)
+{
+ int ret;
+
+ ret = cmbops->set(cdev, 0);
+ if (ret)
+ return ret;
+ cmbops->free(cdev);
+ sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
+ return ret;
+}
+
+/**
+ * cmf_read() - read one value from the current channel measurement block
+ * @cdev: the channel to be read
+ * @index: the index of the value to be read
+ *
+ * Returns the value read or %0 if the value cannot be read.
+ *
+ * Context:
+ * any
+ */
+u64 cmf_read(struct ccw_device *cdev, int index)
+{
+ return cmbops->read(cdev, index);
+}
+
+/**
+ * cmf_readall() - read the current channel measurement block
+ * @cdev: the channel to be read
+ * @data: a pointer to a data block that will be filled
+ *
+ * Returns %0 on success, a negative error value otherwise.
+ *
+ * Context:
+ * any
+ */
+int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
+{
+ return cmbops->readall(cdev, data);
+}
+
+/* Reenable cmf when a disconnected device becomes available again. */
+int cmf_reenable(struct ccw_device *cdev)
+{
+ cmbops->reset(cdev);
+ return cmbops->set(cdev, 2);
+}
+
+static int __init init_cmf(void)
+{
+ char *format_string;
+ char *detect_string = "parameter";
+
+ /*
+ * If the user did not give a parameter, see if we are running on a
+ * machine supporting extended measurement blocks, otherwise fall back
+ * to basic mode.
+ */
+ if (format == CMF_AUTODETECT) {
+ if (!css_general_characteristics.ext_mb) {
+ format = CMF_BASIC;
+ } else {
+ format = CMF_EXTENDED;
+ }
+ detect_string = "autodetected";
+ } else {
+ detect_string = "parameter";
+ }
+
+ switch (format) {
+ case CMF_BASIC:
+ format_string = "basic";
+ cmbops = &cmbops_basic;
+ break;
+ case CMF_EXTENDED:
+ format_string = "extended";
+ cmbops = &cmbops_extended;
+ break;
+ default:
+ return 1;
+ }
+ pr_info("Channel measurement facility initialized using format "
+ "%s (mode %s)\n", format_string, detect_string);
+ return 0;
+}
+
+module_init(init_cmf);
+
+
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("channel measurement facility base driver\n"
+ "Copyright IBM Corp. 2003\n");
+
+EXPORT_SYMBOL_GPL(enable_cmf);
+EXPORT_SYMBOL_GPL(disable_cmf);
+EXPORT_SYMBOL_GPL(cmf_read);
+EXPORT_SYMBOL_GPL(cmf_readall);
diff --git a/kernel/drivers/s390/cio/crw.c b/kernel/drivers/s390/cio/crw.c
new file mode 100644
index 000000000..0f8a25f98
--- /dev/null
+++ b/kernel/drivers/s390/cio/crw.c
@@ -0,0 +1,161 @@
+/*
+ * Channel report handling code
+ *
+ * Copyright IBM Corp. 2000, 2009
+ * Author(s): Ingo Adlung <adlung@de.ibm.com>,
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Cornelia Huck <cornelia.huck@de.ibm.com>,
+ * Heiko Carstens <heiko.carstens@de.ibm.com>,
+ */
+
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <asm/crw.h>
+#include <asm/ctl_reg.h>
+
+static DEFINE_MUTEX(crw_handler_mutex);
+static crw_handler_t crw_handlers[NR_RSCS];
+static atomic_t crw_nr_req = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
+
+/**
+ * crw_register_handler() - register a channel report word handler
+ * @rsc: reporting source code to handle
+ * @handler: handler to be registered
+ *
+ * Returns %0 on success and a negative error value otherwise.
+ */
+int crw_register_handler(int rsc, crw_handler_t handler)
+{
+ int rc = 0;
+
+ if ((rsc < 0) || (rsc >= NR_RSCS))
+ return -EINVAL;
+ mutex_lock(&crw_handler_mutex);
+ if (crw_handlers[rsc])
+ rc = -EBUSY;
+ else
+ crw_handlers[rsc] = handler;
+ mutex_unlock(&crw_handler_mutex);
+ return rc;
+}
+
+/**
+ * crw_unregister_handler() - unregister a channel report word handler
+ * @rsc: reporting source code to handle
+ */
+void crw_unregister_handler(int rsc)
+{
+ if ((rsc < 0) || (rsc >= NR_RSCS))
+ return;
+ mutex_lock(&crw_handler_mutex);
+ crw_handlers[rsc] = NULL;
+ mutex_unlock(&crw_handler_mutex);
+}
+
+/*
+ * Retrieve CRWs and call function to handle event.
+ */
+static int crw_collect_info(void *unused)
+{
+ struct crw crw[2];
+ int ccode, signal;
+ unsigned int chain;
+
+repeat:
+ signal = wait_event_interruptible(crw_handler_wait_q,
+ atomic_read(&crw_nr_req) > 0);
+ if (unlikely(signal))
+ atomic_inc(&crw_nr_req);
+ chain = 0;
+ while (1) {
+ crw_handler_t handler;
+
+ if (unlikely(chain > 1)) {
+ struct crw tmp_crw;
+
+ printk(KERN_WARNING"%s: Code does not support more "
+ "than two chained crws; please report to "
+ "linux390@de.ibm.com!\n", __func__);
+ ccode = stcrw(&tmp_crw);
+ printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ __func__, tmp_crw.slct, tmp_crw.oflw,
+ tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
+ tmp_crw.erc, tmp_crw.rsid);
+ printk(KERN_WARNING"%s: This was crw number %x in the "
+ "chain\n", __func__, chain);
+ if (ccode != 0)
+ break;
+ chain = tmp_crw.chn ? chain + 1 : 0;
+ continue;
+ }
+ ccode = stcrw(&crw[chain]);
+ if (ccode != 0)
+ break;
+ printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw[chain].slct, crw[chain].oflw, crw[chain].chn,
+ crw[chain].rsc, crw[chain].anc, crw[chain].erc,
+ crw[chain].rsid);
+ /* Check for overflows. */
+ if (crw[chain].oflw) {
+ int i;
+
+ pr_debug("%s: crw overflow detected!\n", __func__);
+ mutex_lock(&crw_handler_mutex);
+ for (i = 0; i < NR_RSCS; i++) {
+ if (crw_handlers[i])
+ crw_handlers[i](NULL, NULL, 1);
+ }
+ mutex_unlock(&crw_handler_mutex);
+ chain = 0;
+ continue;
+ }
+ if (crw[0].chn && !chain) {
+ chain++;
+ continue;
+ }
+ mutex_lock(&crw_handler_mutex);
+ handler = crw_handlers[crw[chain].rsc];
+ if (handler)
+ handler(&crw[0], chain ? &crw[1] : NULL, 0);
+ mutex_unlock(&crw_handler_mutex);
+ /* chain is always 0 or 1 here. */
+ chain = crw[chain].chn ? chain + 1 : 0;
+ }
+ if (atomic_dec_and_test(&crw_nr_req))
+ wake_up(&crw_handler_wait_q);
+ goto repeat;
+ return 0;
+}
+
+void crw_handle_channel_report(void)
+{
+ atomic_inc(&crw_nr_req);
+ wake_up(&crw_handler_wait_q);
+}
+
+void crw_wait_for_channel_report(void)
+{
+ crw_handle_channel_report();
+ wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
+}
+
+/*
+ * Machine checks for the channel subsystem must be enabled
+ * after the channel subsystem is initialized
+ */
+static int __init crw_machine_check_init(void)
+{
+ struct task_struct *task;
+
+ task = kthread_run(crw_collect_info, NULL, "kmcheck");
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ ctl_set_bit(14, 28); /* enable channel report MCH */
+ return 0;
+}
+device_initcall(crw_machine_check_init);
diff --git a/kernel/drivers/s390/cio/css.c b/kernel/drivers/s390/cio/css.c
new file mode 100644
index 000000000..0268e5fd5
--- /dev/null
+++ b/kernel/drivers/s390/cio/css.c
@@ -0,0 +1,1294 @@
+/*
+ * driver for channel subsystem
+ *
+ * Copyright IBM Corp. 2002, 2010
+ *
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+#include <linux/proc_fs.h>
+#include <asm/isc.h>
+#include <asm/crw.h>
+
+#include "css.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+#include "chsc.h"
+#include "device.h"
+#include "idset.h"
+#include "chp.h"
+
+int css_init_done = 0;
+int max_ssid;
+
+struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
+static struct bus_type css_bus_type;
+
+int
+for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
+{
+ struct subchannel_id schid;
+ int ret;
+
+ init_subchannel_id(&schid);
+ ret = -ENODEV;
+ do {
+ do {
+ ret = fn(schid, data);
+ if (ret)
+ break;
+ } while (schid.sch_no++ < __MAX_SUBCHANNEL);
+ schid.sch_no = 0;
+ } while (schid.ssid++ < max_ssid);
+ return ret;
+}
+
+struct cb_data {
+ void *data;
+ struct idset *set;
+ int (*fn_known_sch)(struct subchannel *, void *);
+ int (*fn_unknown_sch)(struct subchannel_id, void *);
+};
+
+static int call_fn_known_sch(struct device *dev, void *data)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct cb_data *cb = data;
+ int rc = 0;
+
+ if (cb->set)
+ idset_sch_del(cb->set, sch->schid);
+ if (cb->fn_known_sch)
+ rc = cb->fn_known_sch(sch, cb->data);
+ return rc;
+}
+
+static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
+{
+ struct cb_data *cb = data;
+ int rc = 0;
+
+ if (idset_sch_contains(cb->set, schid))
+ rc = cb->fn_unknown_sch(schid, cb->data);
+ return rc;
+}
+
+static int call_fn_all_sch(struct subchannel_id schid, void *data)
+{
+ struct cb_data *cb = data;
+ struct subchannel *sch;
+ int rc = 0;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ if (cb->fn_known_sch)
+ rc = cb->fn_known_sch(sch, cb->data);
+ put_device(&sch->dev);
+ } else {
+ if (cb->fn_unknown_sch)
+ rc = cb->fn_unknown_sch(schid, cb->data);
+ }
+
+ return rc;
+}
+
+int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
+ int (*fn_unknown)(struct subchannel_id,
+ void *), void *data)
+{
+ struct cb_data cb;
+ int rc;
+
+ cb.data = data;
+ cb.fn_known_sch = fn_known;
+ cb.fn_unknown_sch = fn_unknown;
+
+ if (fn_known && !fn_unknown) {
+ /* Skip idset allocation in case of known-only loop. */
+ cb.set = NULL;
+ return bus_for_each_dev(&css_bus_type, NULL, &cb,
+ call_fn_known_sch);
+ }
+
+ cb.set = idset_sch_new();
+ if (!cb.set)
+ /* fall back to brute force scanning in case of oom */
+ return for_each_subchannel(call_fn_all_sch, &cb);
+
+ idset_fill(cb.set);
+
+ /* Process registered subchannels. */
+ rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
+ if (rc)
+ goto out;
+ /* Process unregistered subchannels. */
+ if (fn_unknown)
+ rc = for_each_subchannel(call_fn_unknown_sch, &cb);
+out:
+ idset_free(cb.set);
+
+ return rc;
+}
+
+static void css_sch_todo(struct work_struct *work);
+
+static int css_sch_create_locks(struct subchannel *sch)
+{
+ sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
+ if (!sch->lock)
+ return -ENOMEM;
+
+ spin_lock_init(sch->lock);
+ mutex_init(&sch->reg_mutex);
+
+ return 0;
+}
+
+static void css_subchannel_release(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
+ kfree(sch->lock);
+ kfree(sch);
+}
+
+struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
+ if (!sch)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cio_validate_subchannel(sch, schid);
+ if (ret < 0)
+ goto err;
+
+ ret = css_sch_create_locks(sch);
+ if (ret)
+ goto err;
+
+ INIT_WORK(&sch->todo_work, css_sch_todo);
+ sch->dev.release = &css_subchannel_release;
+ device_initialize(&sch->dev);
+ return sch;
+
+err:
+ kfree(sch);
+ return ERR_PTR(ret);
+}
+
+static int css_sch_device_register(struct subchannel *sch)
+{
+ int ret;
+
+ mutex_lock(&sch->reg_mutex);
+ dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
+ sch->schid.sch_no);
+ ret = device_add(&sch->dev);
+ mutex_unlock(&sch->reg_mutex);
+ return ret;
+}
+
+/**
+ * css_sch_device_unregister - unregister a subchannel
+ * @sch: subchannel to be unregistered
+ */
+void css_sch_device_unregister(struct subchannel *sch)
+{
+ mutex_lock(&sch->reg_mutex);
+ if (device_is_registered(&sch->dev))
+ device_unregister(&sch->dev);
+ mutex_unlock(&sch->reg_mutex);
+}
+EXPORT_SYMBOL_GPL(css_sch_device_unregister);
+
+static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
+{
+ int i;
+ int mask;
+
+ memset(ssd, 0, sizeof(struct chsc_ssd_info));
+ ssd->path_mask = pmcw->pim;
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (pmcw->pim & mask) {
+ chp_id_init(&ssd->chpid[i]);
+ ssd->chpid[i].id = pmcw->chpid[i];
+ }
+ }
+}
+
+static void ssd_register_chpids(struct chsc_ssd_info *ssd)
+{
+ int i;
+ int mask;
+
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (ssd->path_mask & mask)
+ if (!chp_is_registered(ssd->chpid[i]))
+ chp_new(ssd->chpid[i]);
+ }
+}
+
+void css_update_ssd_info(struct subchannel *sch)
+{
+ int ret;
+
+ ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
+ if (ret)
+ ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
+
+ ssd_register_chpids(&sch->ssd_info);
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sprintf(buf, "%01x\n", sch->st);
+}
+
+static DEVICE_ATTR(type, 0444, type_show, NULL);
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sprintf(buf, "css:t%01X\n", sch->st);
+}
+
+static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
+
+static struct attribute *subch_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+
+static struct attribute_group subch_attr_group = {
+ .attrs = subch_attrs,
+};
+
+static const struct attribute_group *default_subch_attr_groups[] = {
+ &subch_attr_group,
+ NULL,
+};
+
+int css_register_subchannel(struct subchannel *sch)
+{
+ int ret;
+
+ /* Initialize the subchannel structure */
+ sch->dev.parent = &channel_subsystems[0]->device;
+ sch->dev.bus = &css_bus_type;
+ sch->dev.groups = default_subch_attr_groups;
+ /*
+ * We don't want to generate uevents for I/O subchannels that don't
+ * have a working ccw device behind them since they will be
+ * unregistered before they can be used anyway, so we delay the add
+ * uevent until after device recognition was successful.
+ * Note that we suppress the uevent for all subchannel types;
+ * the subchannel driver can decide itself when it wants to inform
+ * userspace of its existence.
+ */
+ dev_set_uevent_suppress(&sch->dev, 1);
+ css_update_ssd_info(sch);
+ /* make it known to the system */
+ ret = css_sch_device_register(sch);
+ if (ret) {
+ CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
+ sch->schid.ssid, sch->schid.sch_no, ret);
+ return ret;
+ }
+ if (!sch->driver) {
+ /*
+ * No driver matched. Generate the uevent now so that
+ * a fitting driver module may be loaded based on the
+ * modalias.
+ */
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+ return ret;
+}
+
+static int css_probe_device(struct subchannel_id schid)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ return PTR_ERR(sch);
+
+ ret = css_register_subchannel(sch);
+ if (ret)
+ put_device(&sch->dev);
+
+ return ret;
+}
+
+static int
+check_subchannel(struct device * dev, void * data)
+{
+ struct subchannel *sch;
+ struct subchannel_id *schid = data;
+
+ sch = to_subchannel(dev);
+ return schid_equal(&sch->schid, schid);
+}
+
+struct subchannel *
+get_subchannel_by_schid(struct subchannel_id schid)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&css_bus_type, NULL,
+ &schid, check_subchannel);
+
+ return dev ? to_subchannel(dev) : NULL;
+}
+
+/**
+ * css_sch_is_valid() - check if a subchannel is valid
+ * @schib: subchannel information block for the subchannel
+ */
+int css_sch_is_valid(struct schib *schib)
+{
+ if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
+ return 0;
+ if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL_GPL(css_sch_is_valid);
+
+static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
+{
+ struct schib schib;
+
+ if (!slow) {
+ /* Will be done on the slow path. */
+ return -EAGAIN;
+ }
+ if (stsch_err(schid, &schib)) {
+ /* Subchannel is not provided. */
+ return -ENXIO;
+ }
+ if (!css_sch_is_valid(&schib)) {
+ /* Unusable - ignore. */
+ return 0;
+ }
+ CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
+ schid.sch_no);
+
+ return css_probe_device(schid);
+}
+
+static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
+{
+ int ret = 0;
+
+ if (sch->driver) {
+ if (sch->driver->sch_event)
+ ret = sch->driver->sch_event(sch, slow);
+ else
+ dev_dbg(&sch->dev,
+ "Got subchannel machine check but "
+ "no sch_event handler provided.\n");
+ }
+ if (ret != 0 && ret != -EAGAIN) {
+ CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, ret);
+ }
+ return ret;
+}
+
+static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ ret = css_evaluate_known_subchannel(sch, slow);
+ put_device(&sch->dev);
+ } else
+ ret = css_evaluate_new_subchannel(schid, slow);
+ if (ret == -EAGAIN)
+ css_schedule_eval(schid);
+}
+
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(css_sched_sch_todo);
+
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+ int ret;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ switch (todo) {
+ case SCH_TODO_NOTHING:
+ break;
+ case SCH_TODO_EVAL:
+ ret = css_evaluate_known_subchannel(sch, 1);
+ if (ret == -EAGAIN) {
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, todo);
+ spin_unlock_irq(sch->lock);
+ }
+ break;
+ case SCH_TODO_UNREG:
+ css_sch_device_unregister(sch);
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
+static struct idset *slow_subchannel_set;
+static spinlock_t slow_subchannel_lock;
+static wait_queue_head_t css_eval_wq;
+static atomic_t css_eval_scheduled;
+
+static int __init slow_subchannel_init(void)
+{
+ spin_lock_init(&slow_subchannel_lock);
+ atomic_set(&css_eval_scheduled, 0);
+ init_waitqueue_head(&css_eval_wq);
+ slow_subchannel_set = idset_sch_new();
+ if (!slow_subchannel_set) {
+ CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int slow_eval_known_fn(struct subchannel *sch, void *data)
+{
+ int eval;
+ int rc;
+
+ spin_lock_irq(&slow_subchannel_lock);
+ eval = idset_sch_contains(slow_subchannel_set, sch->schid);
+ idset_sch_del(slow_subchannel_set, sch->schid);
+ spin_unlock_irq(&slow_subchannel_lock);
+ if (eval) {
+ rc = css_evaluate_known_subchannel(sch, 1);
+ if (rc == -EAGAIN)
+ css_schedule_eval(sch->schid);
+ }
+ return 0;
+}
+
+static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
+{
+ int eval;
+ int rc = 0;
+
+ spin_lock_irq(&slow_subchannel_lock);
+ eval = idset_sch_contains(slow_subchannel_set, schid);
+ idset_sch_del(slow_subchannel_set, schid);
+ spin_unlock_irq(&slow_subchannel_lock);
+ if (eval) {
+ rc = css_evaluate_new_subchannel(schid, 1);
+ switch (rc) {
+ case -EAGAIN:
+ css_schedule_eval(schid);
+ rc = 0;
+ break;
+ case -ENXIO:
+ case -ENOMEM:
+ case -EIO:
+ /* These should abort looping */
+ spin_lock_irq(&slow_subchannel_lock);
+ idset_sch_del_subseq(slow_subchannel_set, schid);
+ spin_unlock_irq(&slow_subchannel_lock);
+ break;
+ default:
+ rc = 0;
+ }
+ /* Allow scheduling here since the containing loop might
+ * take a while. */
+ cond_resched();
+ }
+ return rc;
+}
+
+static void css_slow_path_func(struct work_struct *unused)
+{
+ unsigned long flags;
+
+ CIO_TRACE_EVENT(4, "slowpath");
+ for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
+ NULL);
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ if (idset_is_empty(slow_subchannel_set)) {
+ atomic_set(&css_eval_scheduled, 0);
+ wake_up(&css_eval_wq);
+ }
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
+struct workqueue_struct *cio_work_q;
+
+void css_schedule_eval(struct subchannel_id schid)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ idset_sch_add(slow_subchannel_set, schid);
+ atomic_set(&css_eval_scheduled, 1);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+void css_schedule_eval_all(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ idset_fill(slow_subchannel_set);
+ atomic_set(&css_eval_scheduled, 1);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+static int __unset_registered(struct device *dev, void *data)
+{
+ struct idset *set = data;
+ struct subchannel *sch = to_subchannel(dev);
+
+ idset_sch_del(set, sch->schid);
+ return 0;
+}
+
+void css_schedule_eval_all_unreg(unsigned long delay)
+{
+ unsigned long flags;
+ struct idset *unreg_set;
+
+ /* Find unregistered subchannels. */
+ unreg_set = idset_sch_new();
+ if (!unreg_set) {
+ /* Fallback. */
+ css_schedule_eval_all();
+ return;
+ }
+ idset_fill(unreg_set);
+ bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+ /* Apply to slow_subchannel_set. */
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ idset_add_set(slow_subchannel_set, unreg_set);
+ atomic_set(&css_eval_scheduled, 1);
+ queue_delayed_work(cio_work_q, &slow_path_work, delay);
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+ idset_free(unreg_set);
+}
+
+void css_wait_for_slow_path(void)
+{
+ flush_workqueue(cio_work_q);
+}
+
+/* Schedule reprobing of all unregistered subchannels. */
+void css_schedule_reprobe(void)
+{
+ /* Schedule with a delay to allow merging of subsequent calls. */
+ css_schedule_eval_all_unreg(1 * HZ);
+}
+EXPORT_SYMBOL_GPL(css_schedule_reprobe);
+
+/*
+ * Called from the machine check handler for subchannel report words.
+ */
+static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
+{
+ struct subchannel_id mchk_schid;
+ struct subchannel *sch;
+
+ if (overflow) {
+ css_schedule_eval_all();
+ return;
+ }
+ CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+ crw0->erc, crw0->rsid);
+ if (crw1)
+ CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
+ crw1->anc, crw1->erc, crw1->rsid);
+ init_subchannel_id(&mchk_schid);
+ mchk_schid.sch_no = crw0->rsid;
+ if (crw1)
+ mchk_schid.ssid = (crw1->rsid >> 4) & 3;
+
+ if (crw0->erc == CRW_ERC_PMOD) {
+ sch = get_subchannel_by_schid(mchk_schid);
+ if (sch) {
+ css_update_ssd_info(sch);
+ put_device(&sch->dev);
+ }
+ }
+ /*
+ * Since we are always presented with IPI in the CRW, we have to
+ * use stsch() to find out if the subchannel in question has come
+ * or gone.
+ */
+ css_evaluate_subchannel(mchk_schid, 0);
+}
+
+static void __init
+css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
+{
+ struct cpuid cpu_id;
+
+ if (css_general_characteristics.mcss) {
+ css->global_pgid.pgid_high.ext_cssid.version = 0x80;
+ css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
+ } else {
+#ifdef CONFIG_SMP
+ css->global_pgid.pgid_high.cpu_addr = stap();
+#else
+ css->global_pgid.pgid_high.cpu_addr = 0;
+#endif
+ }
+ get_cpu_id(&cpu_id);
+ css->global_pgid.cpu_id = cpu_id.ident;
+ css->global_pgid.cpu_model = cpu_id.machine;
+ css->global_pgid.tod_high = tod_high;
+
+}
+
+static void
+channel_subsystem_release(struct device *dev)
+{
+ struct channel_subsystem *css;
+
+ css = to_css(dev);
+ mutex_destroy(&css->mutex);
+ if (css->pseudo_subchannel) {
+ /* Implies that it has been generated but never registered. */
+ css_subchannel_release(&css->pseudo_subchannel->dev);
+ css->pseudo_subchannel = NULL;
+ }
+ kfree(css);
+}
+
+static ssize_t
+css_cm_enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_subsystem *css = to_css(dev);
+ int ret;
+
+ if (!css)
+ return 0;
+ mutex_lock(&css->mutex);
+ ret = sprintf(buf, "%x\n", css->cm_enabled);
+ mutex_unlock(&css->mutex);
+ return ret;
+}
+
+static ssize_t
+css_cm_enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct channel_subsystem *css = to_css(dev);
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+ mutex_lock(&css->mutex);
+ switch (val) {
+ case 0:
+ ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
+ break;
+ case 1:
+ ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&css->mutex);
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
+
+static int __init setup_css(int nr)
+{
+ u32 tod_high;
+ int ret;
+ struct channel_subsystem *css;
+
+ css = channel_subsystems[nr];
+ memset(css, 0, sizeof(struct channel_subsystem));
+ css->pseudo_subchannel =
+ kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
+ if (!css->pseudo_subchannel)
+ return -ENOMEM;
+ css->pseudo_subchannel->dev.parent = &css->device;
+ css->pseudo_subchannel->dev.release = css_subchannel_release;
+ dev_set_name(&css->pseudo_subchannel->dev, "defunct");
+ mutex_init(&css->pseudo_subchannel->reg_mutex);
+ ret = css_sch_create_locks(css->pseudo_subchannel);
+ if (ret) {
+ kfree(css->pseudo_subchannel);
+ return ret;
+ }
+ mutex_init(&css->mutex);
+ css->valid = 1;
+ css->cssid = nr;
+ dev_set_name(&css->device, "css%x", nr);
+ css->device.release = channel_subsystem_release;
+ tod_high = (u32) (get_tod_clock() >> 32);
+ css_generate_pgid(css, tod_high);
+ return 0;
+}
+
+static int css_reboot_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ int ret, i;
+
+ ret = NOTIFY_DONE;
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ struct channel_subsystem *css;
+
+ css = channel_subsystems[i];
+ mutex_lock(&css->mutex);
+ if (css->cm_enabled)
+ if (chsc_secm(css, 0))
+ ret = NOTIFY_BAD;
+ mutex_unlock(&css->mutex);
+ }
+
+ return ret;
+}
+
+static struct notifier_block css_reboot_notifier = {
+ .notifier_call = css_reboot_event,
+};
+
+/*
+ * Since the css devices are neither on a bus nor have a class
+ * nor have a special device type, we cannot stop/restart channel
+ * path measurements via the normal suspend/resume callbacks, but have
+ * to use notifiers.
+ */
+static int css_power_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ int ret, i;
+
+ switch (event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ret = NOTIFY_DONE;
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ struct channel_subsystem *css;
+
+ css = channel_subsystems[i];
+ mutex_lock(&css->mutex);
+ if (!css->cm_enabled) {
+ mutex_unlock(&css->mutex);
+ continue;
+ }
+ ret = __chsc_do_secm(css, 0);
+ ret = notifier_from_errno(ret);
+ mutex_unlock(&css->mutex);
+ }
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ ret = NOTIFY_DONE;
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ struct channel_subsystem *css;
+
+ css = channel_subsystems[i];
+ mutex_lock(&css->mutex);
+ if (!css->cm_enabled) {
+ mutex_unlock(&css->mutex);
+ continue;
+ }
+ ret = __chsc_do_secm(css, 1);
+ ret = notifier_from_errno(ret);
+ mutex_unlock(&css->mutex);
+ }
+ /* search for subchannels, which appeared during hibernation */
+ css_schedule_reprobe();
+ break;
+ default:
+ ret = NOTIFY_DONE;
+ }
+ return ret;
+
+}
+static struct notifier_block css_power_notifier = {
+ .notifier_call = css_power_event,
+};
+
+/*
+ * Now that the driver core is running, we can setup our channel subsystem.
+ * The struct subchannel's are created during probing.
+ */
+static int __init css_bus_init(void)
+{
+ int ret, i;
+
+ ret = chsc_init();
+ if (ret)
+ return ret;
+
+ chsc_determine_css_characteristics();
+ /* Try to enable MSS. */
+ ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
+ if (ret)
+ max_ssid = 0;
+ else /* Success. */
+ max_ssid = __MAX_SSID;
+
+ ret = slow_subchannel_init();
+ if (ret)
+ goto out;
+
+ ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
+ if (ret)
+ goto out;
+
+ if ((ret = bus_register(&css_bus_type)))
+ goto out;
+
+ /* Setup css structure. */
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ struct channel_subsystem *css;
+
+ css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
+ if (!css) {
+ ret = -ENOMEM;
+ goto out_unregister;
+ }
+ channel_subsystems[i] = css;
+ ret = setup_css(i);
+ if (ret) {
+ kfree(channel_subsystems[i]);
+ goto out_unregister;
+ }
+ ret = device_register(&css->device);
+ if (ret) {
+ put_device(&css->device);
+ goto out_unregister;
+ }
+ if (css_chsc_characteristics.secm) {
+ ret = device_create_file(&css->device,
+ &dev_attr_cm_enable);
+ if (ret)
+ goto out_device;
+ }
+ ret = device_register(&css->pseudo_subchannel->dev);
+ if (ret) {
+ put_device(&css->pseudo_subchannel->dev);
+ goto out_file;
+ }
+ }
+ ret = register_reboot_notifier(&css_reboot_notifier);
+ if (ret)
+ goto out_unregister;
+ ret = register_pm_notifier(&css_power_notifier);
+ if (ret) {
+ unregister_reboot_notifier(&css_reboot_notifier);
+ goto out_unregister;
+ }
+ css_init_done = 1;
+
+ /* Enable default isc for I/O subchannels. */
+ isc_register(IO_SCH_ISC);
+
+ return 0;
+out_file:
+ if (css_chsc_characteristics.secm)
+ device_remove_file(&channel_subsystems[i]->device,
+ &dev_attr_cm_enable);
+out_device:
+ device_unregister(&channel_subsystems[i]->device);
+out_unregister:
+ while (i > 0) {
+ struct channel_subsystem *css;
+
+ i--;
+ css = channel_subsystems[i];
+ device_unregister(&css->pseudo_subchannel->dev);
+ css->pseudo_subchannel = NULL;
+ if (css_chsc_characteristics.secm)
+ device_remove_file(&css->device,
+ &dev_attr_cm_enable);
+ device_unregister(&css->device);
+ }
+ bus_unregister(&css_bus_type);
+out:
+ crw_unregister_handler(CRW_RSC_SCH);
+ idset_free(slow_subchannel_set);
+ chsc_init_cleanup();
+ pr_alert("The CSS device driver initialization failed with "
+ "errno=%d\n", ret);
+ return ret;
+}
+
+static void __init css_bus_cleanup(void)
+{
+ struct channel_subsystem *css;
+ int i;
+
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ css = channel_subsystems[i];
+ device_unregister(&css->pseudo_subchannel->dev);
+ css->pseudo_subchannel = NULL;
+ if (css_chsc_characteristics.secm)
+ device_remove_file(&css->device, &dev_attr_cm_enable);
+ device_unregister(&css->device);
+ }
+ bus_unregister(&css_bus_type);
+ crw_unregister_handler(CRW_RSC_SCH);
+ idset_free(slow_subchannel_set);
+ chsc_init_cleanup();
+ isc_unregister(IO_SCH_ISC);
+}
+
+static int __init channel_subsystem_init(void)
+{
+ int ret;
+
+ ret = css_bus_init();
+ if (ret)
+ return ret;
+ cio_work_q = create_singlethread_workqueue("cio");
+ if (!cio_work_q) {
+ ret = -ENOMEM;
+ goto out_bus;
+ }
+ ret = io_subchannel_init();
+ if (ret)
+ goto out_wq;
+
+ return ret;
+out_wq:
+ destroy_workqueue(cio_work_q);
+out_bus:
+ css_bus_cleanup();
+ return ret;
+}
+subsys_initcall(channel_subsystem_init);
+
+static int css_settle(struct device_driver *drv, void *unused)
+{
+ struct css_driver *cssdrv = to_cssdriver(drv);
+
+ if (cssdrv->settle)
+ return cssdrv->settle();
+ return 0;
+}
+
+int css_complete_work(void)
+{
+ int ret;
+
+ /* Wait for the evaluation of subchannels to finish. */
+ ret = wait_event_interruptible(css_eval_wq,
+ atomic_read(&css_eval_scheduled) == 0);
+ if (ret)
+ return -EINTR;
+ flush_workqueue(cio_work_q);
+ /* Wait for the subchannel type specific initialization to finish */
+ return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
+}
+
+
+/*
+ * Wait for the initialization of devices to finish, to make sure we are
+ * done with our setup if the search for the root device starts.
+ */
+static int __init channel_subsystem_init_sync(void)
+{
+ /* Register subchannels which are already in use. */
+ cio_register_early_subchannels();
+ /* Start initial subchannel evaluation. */
+ css_schedule_eval_all();
+ css_complete_work();
+ return 0;
+}
+subsys_initcall_sync(channel_subsystem_init_sync);
+
+void channel_subsystem_reinit(void)
+{
+ struct channel_path *chp;
+ struct chp_id chpid;
+
+ chsc_enable_facility(CHSC_SDA_OC_MSS);
+ chp_id_for_each(&chpid) {
+ chp = chpid_to_chp(chpid);
+ if (chp)
+ chp_update_desc(chp);
+ }
+}
+
+#ifdef CONFIG_PROC_FS
+static ssize_t cio_settle_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ /* Handle pending CRW's. */
+ crw_wait_for_channel_report();
+ ret = css_complete_work();
+
+ return ret ? ret : count;
+}
+
+static const struct file_operations cio_settle_proc_fops = {
+ .open = nonseekable_open,
+ .write = cio_settle_write,
+ .llseek = no_llseek,
+};
+
+static int __init cio_settle_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ entry = proc_create("cio_settle", S_IWUSR, NULL,
+ &cio_settle_proc_fops);
+ if (!entry)
+ return -ENOMEM;
+ return 0;
+}
+device_initcall(cio_settle_init);
+#endif /*CONFIG_PROC_FS*/
+
+int sch_is_pseudo_sch(struct subchannel *sch)
+{
+ return sch == to_css(sch->dev.parent)->pseudo_subchannel;
+}
+
+static int css_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *driver = to_cssdriver(drv);
+ struct css_device_id *id;
+
+ for (id = driver->subchannel_type; id->match_flags; id++) {
+ if (sch->st == id->type)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int css_probe(struct device *dev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = to_subchannel(dev);
+ sch->driver = to_cssdriver(dev->driver);
+ ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
+ if (ret)
+ sch->driver = NULL;
+ return ret;
+}
+
+static int css_remove(struct device *dev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = to_subchannel(dev);
+ ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
+ sch->driver = NULL;
+ return ret;
+}
+
+static void css_shutdown(struct device *dev)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(dev);
+ if (sch->driver && sch->driver->shutdown)
+ sch->driver->shutdown(sch);
+}
+
+static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ int ret;
+
+ ret = add_uevent_var(env, "ST=%01X", sch->st);
+ if (ret)
+ return ret;
+ ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
+ return ret;
+}
+
+static int css_pm_prepare(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (mutex_is_locked(&sch->reg_mutex))
+ return -EAGAIN;
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ /* Notify drivers that they may not register children. */
+ return drv->prepare ? drv->prepare(sch) : 0;
+}
+
+static void css_pm_complete(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return;
+ drv = to_cssdriver(sch->dev.driver);
+ if (drv->complete)
+ drv->complete(sch);
+}
+
+static int css_pm_freeze(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->freeze ? drv->freeze(sch) : 0;
+}
+
+static int css_pm_thaw(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->thaw ? drv->thaw(sch) : 0;
+}
+
+static int css_pm_restore(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ css_update_ssd_info(sch);
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->restore ? drv->restore(sch) : 0;
+}
+
+static const struct dev_pm_ops css_pm_ops = {
+ .prepare = css_pm_prepare,
+ .complete = css_pm_complete,
+ .freeze = css_pm_freeze,
+ .thaw = css_pm_thaw,
+ .restore = css_pm_restore,
+};
+
+static struct bus_type css_bus_type = {
+ .name = "css",
+ .match = css_bus_match,
+ .probe = css_probe,
+ .remove = css_remove,
+ .shutdown = css_shutdown,
+ .uevent = css_uevent,
+ .pm = &css_pm_ops,
+};
+
+/**
+ * css_driver_register - register a css driver
+ * @cdrv: css driver to register
+ *
+ * This is mainly a wrapper around driver_register that sets name
+ * and bus_type in the embedded struct device_driver correctly.
+ */
+int css_driver_register(struct css_driver *cdrv)
+{
+ cdrv->drv.bus = &css_bus_type;
+ return driver_register(&cdrv->drv);
+}
+EXPORT_SYMBOL_GPL(css_driver_register);
+
+/**
+ * css_driver_unregister - unregister a css driver
+ * @cdrv: css driver to unregister
+ *
+ * This is a wrapper around driver_unregister.
+ */
+void css_driver_unregister(struct css_driver *cdrv)
+{
+ driver_unregister(&cdrv->drv);
+}
+EXPORT_SYMBOL_GPL(css_driver_unregister);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/s390/cio/css.h b/kernel/drivers/s390/cio/css.h
new file mode 100644
index 000000000..2c9107e20
--- /dev/null
+++ b/kernel/drivers/s390/cio/css.h
@@ -0,0 +1,146 @@
+#ifndef _CSS_H
+#define _CSS_H
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include <asm/cio.h>
+#include <asm/chpid.h>
+#include <asm/schid.h>
+
+#include "cio.h"
+
+/*
+ * path grouping stuff
+ */
+#define SPID_FUNC_SINGLE_PATH 0x00
+#define SPID_FUNC_MULTI_PATH 0x80
+#define SPID_FUNC_ESTABLISH 0x00
+#define SPID_FUNC_RESIGN 0x40
+#define SPID_FUNC_DISBAND 0x20
+
+#define SNID_STATE1_RESET 0
+#define SNID_STATE1_UNGROUPED 2
+#define SNID_STATE1_GROUPED 3
+
+#define SNID_STATE2_NOT_RESVD 0
+#define SNID_STATE2_RESVD_ELSE 2
+#define SNID_STATE2_RESVD_SELF 3
+
+#define SNID_STATE3_MULTI_PATH 1
+#define SNID_STATE3_SINGLE_PATH 0
+
+struct path_state {
+ __u8 state1 : 2; /* path state value 1 */
+ __u8 state2 : 2; /* path state value 2 */
+ __u8 state3 : 1; /* path state value 3 */
+ __u8 resvd : 3; /* reserved */
+} __attribute__ ((packed));
+
+struct extended_cssid {
+ u8 version;
+ u8 cssid;
+} __attribute__ ((packed));
+
+struct pgid {
+ union {
+ __u8 fc; /* SPID function code */
+ struct path_state ps; /* SNID path state */
+ } __attribute__ ((packed)) inf;
+ union {
+ __u32 cpu_addr : 16; /* CPU address */
+ struct extended_cssid ext_cssid;
+ } __attribute__ ((packed)) pgid_high;
+ __u32 cpu_id : 24; /* CPU identification */
+ __u32 cpu_model : 16; /* CPU model */
+ __u32 tod_high; /* high word TOD clock */
+} __attribute__ ((packed));
+
+struct subchannel;
+struct chp_link;
+/**
+ * struct css_driver - device driver for subchannels
+ * @subchannel_type: subchannel type supported by this driver
+ * @drv: embedded device driver structure
+ * @irq: called on interrupts
+ * @chp_event: called for events affecting a channel path
+ * @sch_event: called for events affecting the subchannel
+ * @probe: function called on probe
+ * @remove: function called on remove
+ * @shutdown: called at device shutdown
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
+ * @settle: wait for asynchronous work to finish
+ */
+struct css_driver {
+ struct css_device_id *subchannel_type;
+ struct device_driver drv;
+ void (*irq)(struct subchannel *);
+ int (*chp_event)(struct subchannel *, struct chp_link *, int);
+ int (*sch_event)(struct subchannel *, int);
+ int (*probe)(struct subchannel *);
+ int (*remove)(struct subchannel *);
+ void (*shutdown)(struct subchannel *);
+ int (*prepare) (struct subchannel *);
+ void (*complete) (struct subchannel *);
+ int (*freeze)(struct subchannel *);
+ int (*thaw) (struct subchannel *);
+ int (*restore)(struct subchannel *);
+ int (*settle)(void);
+};
+
+#define to_cssdriver(n) container_of(n, struct css_driver, drv)
+
+extern int css_driver_register(struct css_driver *);
+extern void css_driver_unregister(struct css_driver *);
+
+extern void css_sch_device_unregister(struct subchannel *);
+extern int css_register_subchannel(struct subchannel *);
+extern struct subchannel *css_alloc_subchannel(struct subchannel_id);
+extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
+extern int css_init_done;
+extern int max_ssid;
+int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
+ int (*fn_unknown)(struct subchannel_id,
+ void *), void *data);
+extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
+void css_update_ssd_info(struct subchannel *sch);
+
+struct channel_subsystem {
+ u8 cssid;
+ int valid;
+ struct channel_path *chps[__MAX_CHPID + 1];
+ struct device device;
+ struct pgid global_pgid;
+ struct mutex mutex;
+ /* channel measurement related */
+ int cm_enabled;
+ void *cub_addr1;
+ void *cub_addr2;
+ /* for orphaned ccw devices */
+ struct subchannel *pseudo_subchannel;
+};
+#define to_css(dev) container_of(dev, struct channel_subsystem, device)
+
+extern struct channel_subsystem *channel_subsystems[];
+
+/* Helper functions to build lists for the slow path. */
+void css_schedule_eval(struct subchannel_id schid);
+void css_schedule_eval_all(void);
+void css_schedule_eval_all_unreg(unsigned long delay);
+int css_complete_work(void);
+
+int sch_is_pseudo_sch(struct subchannel *);
+struct schib;
+int css_sch_is_valid(struct schib *);
+
+extern struct workqueue_struct *cio_work_q;
+void css_wait_for_slow_path(void);
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
+#endif
diff --git a/kernel/drivers/s390/cio/device.c b/kernel/drivers/s390/cio/device.c
new file mode 100644
index 000000000..dfef5e63c
--- /dev/null
+++ b/kernel/drivers/s390/cio/device.c
@@ -0,0 +1,2161 @@
+/*
+ * bus driver for ccw devices
+ *
+ * Copyright IBM Corp. 2002, 2008
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/param.h> /* HZ */
+#include <asm/cmb.h>
+#include <asm/isc.h>
+
+#include "chp.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "io_sch.h"
+#include "blacklist.h"
+#include "chsc.h"
+
+static struct timer_list recovery_timer;
+static DEFINE_SPINLOCK(recovery_lock);
+static int recovery_phase;
+static const unsigned long recovery_delay[] = { 3, 30, 300 };
+
+static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
+static struct bus_type ccw_bus_type;
+
+/******************* bus type handling ***********************/
+
+/* The Linux driver model distinguishes between a bus type and
+ * the bus itself. Of course we only have one channel
+ * subsystem driver and one channel system per machine, but
+ * we still use the abstraction. T.R. says it's a good idea. */
+static int
+ccw_bus_match (struct device * dev, struct device_driver * drv)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_driver *cdrv = to_ccwdrv(drv);
+ const struct ccw_device_id *ids = cdrv->ids, *found;
+
+ if (!ids)
+ return 0;
+
+ found = ccw_device_id_match(ids, &cdev->id);
+ if (!found)
+ return 0;
+
+ cdev->id.driver_info = found->driver_info;
+
+ return 1;
+}
+
+/* Store modalias string delimited by prefix/suffix string into buffer with
+ * specified size. Return length of resulting string (excluding trailing '\0')
+ * even if string doesn't fit buffer (snprintf semantics). */
+static int snprint_alias(char *buf, size_t size,
+ struct ccw_device_id *id, const char *suffix)
+{
+ int len;
+
+ len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
+ if (len > size)
+ return len;
+ buf += len;
+ size -= len;
+
+ if (id->dev_type != 0)
+ len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
+ id->dev_model, suffix);
+ else
+ len += snprintf(buf, size, "dtdm%s", suffix);
+
+ return len;
+}
+
+/* Set up environment variables for ccw device uevent. Return 0 on success,
+ * non-zero otherwise. */
+static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_id *id = &(cdev->id);
+ int ret;
+ char modalias_buf[30];
+
+ /* CU_TYPE= */
+ ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
+ if (ret)
+ return ret;
+
+ /* CU_MODEL= */
+ ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
+ if (ret)
+ return ret;
+
+ /* The next two can be zero, that's ok for us */
+ /* DEV_TYPE= */
+ ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
+ if (ret)
+ return ret;
+
+ /* DEV_MODEL= */
+ ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
+ if (ret)
+ return ret;
+
+ /* MODALIAS= */
+ snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
+ ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
+ return ret;
+}
+
+static void io_subchannel_irq(struct subchannel *);
+static int io_subchannel_probe(struct subchannel *);
+static int io_subchannel_remove(struct subchannel *);
+static void io_subchannel_shutdown(struct subchannel *);
+static int io_subchannel_sch_event(struct subchannel *, int);
+static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
+ int);
+static void recovery_func(unsigned long data);
+
+static struct css_device_id io_subchannel_ids[] = {
+ { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, io_subchannel_ids);
+
+static int io_subchannel_prepare(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+ /*
+ * Don't allow suspend while a ccw device registration
+ * is still outstanding.
+ */
+ cdev = sch_get_cdev(sch);
+ if (cdev && !device_is_registered(&cdev->dev))
+ return -EAGAIN;
+ return 0;
+}
+
+static int io_subchannel_settle(void)
+{
+ int ret;
+
+ ret = wait_event_interruptible(ccw_device_init_wq,
+ atomic_read(&ccw_device_init_count) == 0);
+ if (ret)
+ return -EINTR;
+ flush_workqueue(cio_work_q);
+ return 0;
+}
+
+static struct css_driver io_subchannel_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "io_subchannel",
+ },
+ .subchannel_type = io_subchannel_ids,
+ .irq = io_subchannel_irq,
+ .sch_event = io_subchannel_sch_event,
+ .chp_event = io_subchannel_chp_event,
+ .probe = io_subchannel_probe,
+ .remove = io_subchannel_remove,
+ .shutdown = io_subchannel_shutdown,
+ .prepare = io_subchannel_prepare,
+ .settle = io_subchannel_settle,
+};
+
+int __init io_subchannel_init(void)
+{
+ int ret;
+
+ setup_timer(&recovery_timer, recovery_func, 0);
+ ret = bus_register(&ccw_bus_type);
+ if (ret)
+ return ret;
+ ret = css_driver_register(&io_subchannel_driver);
+ if (ret)
+ bus_unregister(&ccw_bus_type);
+
+ return ret;
+}
+
+
+/************************ device handling **************************/
+
+/*
+ * A ccw_device has some interfaces in sysfs in addition to the
+ * standard ones.
+ * The following entries are designed to export the information which
+ * resided in 2.4 in /proc/subchannels. Subchannel and device number
+ * are obvious, so they don't have an entry :)
+ * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
+ */
+static ssize_t
+chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct chsc_ssd_info *ssd = &sch->ssd_info;
+ ssize_t ret = 0;
+ int chp;
+ int mask;
+
+ for (chp = 0; chp < 8; chp++) {
+ mask = 0x80 >> chp;
+ if (ssd->path_mask & mask)
+ ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
+ else
+ ret += sprintf(buf + ret, "00 ");
+ }
+ ret += sprintf (buf+ret, "\n");
+ return min((ssize_t)PAGE_SIZE, ret);
+}
+
+static ssize_t
+pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct pmcw *pmcw = &sch->schib.pmcw;
+
+ return sprintf (buf, "%02x %02x %02x\n",
+ pmcw->pim, pmcw->pam, pmcw->pom);
+}
+
+static ssize_t
+devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_id *id = &(cdev->id);
+
+ if (id->dev_type != 0)
+ return sprintf(buf, "%04x/%02x\n",
+ id->dev_type, id->dev_model);
+ else
+ return sprintf(buf, "n/a\n");
+}
+
+static ssize_t
+cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_id *id = &(cdev->id);
+
+ return sprintf(buf, "%04x/%02x\n",
+ id->cu_type, id->cu_model);
+}
+
+static ssize_t
+modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_id *id = &(cdev->id);
+ int len;
+
+ len = snprint_alias(buf, PAGE_SIZE, id, "\n");
+
+ return len > PAGE_SIZE ? PAGE_SIZE : len;
+}
+
+static ssize_t
+online_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ return sprintf(buf, cdev->online ? "1\n" : "0\n");
+}
+
+int ccw_device_is_orphan(struct ccw_device *cdev)
+{
+ return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
+}
+
+static void ccw_device_unregister(struct ccw_device *cdev)
+{
+ if (device_is_registered(&cdev->dev)) {
+ /* Undo device_add(). */
+ device_del(&cdev->dev);
+ }
+ if (cdev->private->flags.initialized) {
+ cdev->private->flags.initialized = 0;
+ /* Release reference from device_initialize(). */
+ put_device(&cdev->dev);
+ }
+}
+
+static void io_subchannel_quiesce(struct subchannel *);
+
+/**
+ * ccw_device_set_offline() - disable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function calls the driver's set_offline() function for @cdev, if
+ * given, and then disables @cdev.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ * Context:
+ * enabled, ccw device lock not held
+ */
+int ccw_device_set_offline(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret, state;
+
+ if (!cdev)
+ return -ENODEV;
+ if (!cdev->online || !cdev->drv)
+ return -EINVAL;
+
+ if (cdev->drv->set_offline) {
+ ret = cdev->drv->set_offline(cdev);
+ if (ret != 0)
+ return ret;
+ }
+ spin_lock_irq(cdev->ccwlock);
+ sch = to_subchannel(cdev->dev.parent);
+ cdev->online = 0;
+ /* Wait until a final state or DISCONNECTED is reached */
+ while (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ spin_lock_irq(cdev->ccwlock);
+ }
+ do {
+ ret = ccw_device_offline(cdev);
+ if (!ret)
+ break;
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
+ "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (ret != -EBUSY)
+ goto error;
+ state = cdev->private->state;
+ spin_unlock_irq(cdev->ccwlock);
+ io_subchannel_quiesce(sch);
+ spin_lock_irq(cdev->ccwlock);
+ cdev->private->state = state;
+ } while (ret == -EBUSY);
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ /* Inform the user if set offline failed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ pr_warning("%s: The device entered boxed state while "
+ "being set offline\n", dev_name(&cdev->dev));
+ } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+ pr_warning("%s: The device stopped operating while "
+ "being set offline\n", dev_name(&cdev->dev));
+ }
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ return 0;
+
+error:
+ cdev->private->state = DEV_STATE_OFFLINE;
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ return -ENODEV;
+}
+
+/**
+ * ccw_device_set_online() - enable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function first enables @cdev and then calls the driver's set_online()
+ * function for @cdev, if given. If set_online() returns an error, @cdev is
+ * disabled again.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ * Context:
+ * enabled, ccw device lock not held
+ */
+int ccw_device_set_online(struct ccw_device *cdev)
+{
+ int ret;
+ int ret2;
+
+ if (!cdev)
+ return -ENODEV;
+ if (cdev->online || !cdev->drv)
+ return -EINVAL;
+ /* Hold on to an extra reference while device is online. */
+ if (!get_device(&cdev->dev))
+ return -ENODEV;
+
+ spin_lock_irq(cdev->ccwlock);
+ ret = ccw_device_online(cdev);
+ spin_unlock_irq(cdev->ccwlock);
+ if (ret == 0)
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ else {
+ CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return ret;
+ }
+ spin_lock_irq(cdev->ccwlock);
+ /* Check if online processing was successful */
+ if ((cdev->private->state != DEV_STATE_ONLINE) &&
+ (cdev->private->state != DEV_STATE_W4SENSE)) {
+ spin_unlock_irq(cdev->ccwlock);
+ /* Inform the user that set online failed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ pr_warning("%s: Setting the device online failed "
+ "because it is boxed\n",
+ dev_name(&cdev->dev));
+ } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+ pr_warning("%s: Setting the device online failed "
+ "because it is not operational\n",
+ dev_name(&cdev->dev));
+ }
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return -ENODEV;
+ }
+ spin_unlock_irq(cdev->ccwlock);
+ if (cdev->drv->set_online)
+ ret = cdev->drv->set_online(cdev);
+ if (ret)
+ goto rollback;
+
+ spin_lock_irq(cdev->ccwlock);
+ cdev->online = 1;
+ spin_unlock_irq(cdev->ccwlock);
+ return 0;
+
+rollback:
+ spin_lock_irq(cdev->ccwlock);
+ /* Wait until a final state or DISCONNECTED is reached */
+ while (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ spin_lock_irq(cdev->ccwlock);
+ }
+ ret2 = ccw_device_offline(cdev);
+ if (ret2)
+ goto error;
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return ret;
+
+error:
+ CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret2, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ cdev->private->state = DEV_STATE_OFFLINE;
+ spin_unlock_irq(cdev->ccwlock);
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return ret;
+}
+
+static int online_store_handle_offline(struct ccw_device *cdev)
+{
+ if (cdev->private->state == DEV_STATE_DISCONNECTED) {
+ spin_lock_irq(cdev->ccwlock);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
+ spin_unlock_irq(cdev->ccwlock);
+ return 0;
+ }
+ if (cdev->drv && cdev->drv->set_offline)
+ return ccw_device_set_offline(cdev);
+ return -EINVAL;
+}
+
+static int online_store_recog_and_online(struct ccw_device *cdev)
+{
+ /* Do device recognition, if needed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ spin_lock_irq(cdev->ccwlock);
+ ccw_device_recognition(cdev);
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q,
+ cdev->private->flags.recog_done);
+ if (cdev->private->state != DEV_STATE_OFFLINE)
+ /* recognition failed */
+ return -EAGAIN;
+ }
+ if (cdev->drv && cdev->drv->set_online)
+ return ccw_device_set_online(cdev);
+ return -EINVAL;
+}
+
+static int online_store_handle_online(struct ccw_device *cdev, int force)
+{
+ int ret;
+
+ ret = online_store_recog_and_online(cdev);
+ if (ret && !force)
+ return ret;
+ if (force && cdev->private->state == DEV_STATE_BOXED) {
+ ret = ccw_device_stlck(cdev);
+ if (ret)
+ return ret;
+ if (cdev->id.cu_type == 0)
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ ret = online_store_recog_and_online(cdev);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static ssize_t online_store (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ int force, ret;
+ unsigned long i;
+
+ /* Prevent conflict between multiple on-/offline processing requests. */
+ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ /* Prevent conflict between internal I/Os and on-/offline processing. */
+ if (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ /* Prevent conflict between pending work and on-/offline processing.*/
+ if (work_pending(&cdev->private->todo_work)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (!strncmp(buf, "force\n", count)) {
+ force = 1;
+ i = 1;
+ ret = 0;
+ } else {
+ force = 0;
+ ret = kstrtoul(buf, 16, &i);
+ }
+ if (ret)
+ goto out;
+
+ device_lock(dev);
+ switch (i) {
+ case 0:
+ ret = online_store_handle_offline(cdev);
+ break;
+ case 1:
+ ret = online_store_handle_online(cdev, force);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ device_unlock(dev);
+
+out:
+ atomic_set(&cdev->private->onoff, 0);
+ return (ret < 0) ? ret : count;
+}
+
+static ssize_t
+available_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch;
+
+ if (ccw_device_is_orphan(cdev))
+ return sprintf(buf, "no device\n");
+ switch (cdev->private->state) {
+ case DEV_STATE_BOXED:
+ return sprintf(buf, "boxed\n");
+ case DEV_STATE_DISCONNECTED:
+ case DEV_STATE_DISCONNECTED_SENSE_ID:
+ case DEV_STATE_NOT_OPER:
+ sch = to_subchannel(dev->parent);
+ if (!sch->lpm)
+ return sprintf(buf, "no path\n");
+ else
+ return sprintf(buf, "no device\n");
+ default:
+ /* All other states considered fine. */
+ return sprintf(buf, "good\n");
+ }
+}
+
+static ssize_t
+initiate_logging(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ int rc;
+
+ rc = chsc_siosl(sch->schid);
+ if (rc < 0) {
+ pr_warning("Logging for subchannel 0.%x.%04x failed with "
+ "errno=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, rc);
+ return rc;
+ }
+ pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ return count;
+}
+
+static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sprintf(buf, "%02x\n", sch->vpm);
+}
+
+static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
+static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
+static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
+static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
+static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
+static DEVICE_ATTR(online, 0644, online_show, online_store);
+static DEVICE_ATTR(availability, 0444, available_show, NULL);
+static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
+static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
+
+static struct attribute *io_subchannel_attrs[] = {
+ &dev_attr_chpids.attr,
+ &dev_attr_pimpampom.attr,
+ &dev_attr_logging.attr,
+ &dev_attr_vpm.attr,
+ NULL,
+};
+
+static struct attribute_group io_subchannel_attr_group = {
+ .attrs = io_subchannel_attrs,
+};
+
+static struct attribute * ccwdev_attrs[] = {
+ &dev_attr_devtype.attr,
+ &dev_attr_cutype.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_online.attr,
+ &dev_attr_cmb_enable.attr,
+ &dev_attr_availability.attr,
+ NULL,
+};
+
+static struct attribute_group ccwdev_attr_group = {
+ .attrs = ccwdev_attrs,
+};
+
+static const struct attribute_group *ccwdev_attr_groups[] = {
+ &ccwdev_attr_group,
+ NULL,
+};
+
+static int ccw_device_add(struct ccw_device *cdev)
+{
+ struct device *dev = &cdev->dev;
+
+ dev->bus = &ccw_bus_type;
+ return device_add(dev);
+}
+
+static int match_dev_id(struct device *dev, void *data)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_dev_id *dev_id = data;
+
+ return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
+}
+
+/**
+ * get_ccwdev_by_dev_id() - obtain device from a ccw device id
+ * @dev_id: id of the device to be searched
+ *
+ * This function searches all devices attached to the ccw bus for a device
+ * matching @dev_id.
+ * Returns:
+ * If a device is found its reference count is increased and returned;
+ * else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
+
+ return dev ? to_ccwdev(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
+
+static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
+{
+ int ret;
+
+ if (device_is_registered(&cdev->dev)) {
+ device_release_driver(&cdev->dev);
+ ret = device_attach(&cdev->dev);
+ WARN_ON(ret == -ENODEV);
+ }
+}
+
+static void
+ccw_device_release(struct device *dev)
+{
+ struct ccw_device *cdev;
+
+ cdev = to_ccwdev(dev);
+ /* Release reference of parent subchannel. */
+ put_device(cdev->dev.parent);
+ kfree(cdev->private);
+ kfree(cdev);
+}
+
+static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (cdev) {
+ cdev->private = kzalloc(sizeof(struct ccw_device_private),
+ GFP_KERNEL | GFP_DMA);
+ if (cdev->private)
+ return cdev;
+ }
+ kfree(cdev);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void ccw_device_todo(struct work_struct *work);
+
+static int io_subchannel_initialize_dev(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ struct ccw_device_private *priv = cdev->private;
+ int ret;
+
+ priv->cdev = cdev;
+ priv->int_class = IRQIO_CIO;
+ priv->state = DEV_STATE_NOT_OPER;
+ priv->dev_id.devno = sch->schib.pmcw.dev;
+ priv->dev_id.ssid = sch->schid.ssid;
+ priv->schid = sch->schid;
+
+ INIT_WORK(&priv->todo_work, ccw_device_todo);
+ INIT_LIST_HEAD(&priv->cmb_list);
+ init_waitqueue_head(&priv->wait_q);
+ init_timer(&priv->timer);
+
+ atomic_set(&priv->onoff, 0);
+ cdev->ccwlock = sch->lock;
+ cdev->dev.parent = &sch->dev;
+ cdev->dev.release = ccw_device_release;
+ cdev->dev.groups = ccwdev_attr_groups;
+ /* Do first half of device_register. */
+ device_initialize(&cdev->dev);
+ ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (ret)
+ goto out_put;
+ if (!get_device(&sch->dev)) {
+ ret = -ENODEV;
+ goto out_put;
+ }
+ priv->flags.initialized = 1;
+ spin_lock_irq(sch->lock);
+ sch_set_cdev(sch, cdev);
+ spin_unlock_irq(sch->lock);
+ return 0;
+
+out_put:
+ /* Release reference from device_initialize(). */
+ put_device(&cdev->dev);
+ return ret;
+}
+
+static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+ int ret;
+
+ cdev = io_subchannel_allocate_dev(sch);
+ if (!IS_ERR(cdev)) {
+ ret = io_subchannel_initialize_dev(sch, cdev);
+ if (ret)
+ cdev = ERR_PTR(ret);
+ }
+ return cdev;
+}
+
+static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
+
+static void sch_create_and_recog_new_device(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ /* Need to allocate a new ccw device. */
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev)) {
+ /* OK, we did everything we could... */
+ css_sch_device_unregister(sch);
+ return;
+ }
+ /* Start recognition for the new ccw device. */
+ io_subchannel_recog(cdev, sch);
+}
+
+/*
+ * Register recognized device.
+ */
+static void io_subchannel_register(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret, adjust_init_count = 1;
+ unsigned long flags;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /*
+ * Check if subchannel is still registered. It may have become
+ * unregistered if a machine check hit us after finishing
+ * device recognition but before the register work could be
+ * queued.
+ */
+ if (!device_is_registered(&sch->dev))
+ goto out_err;
+ css_update_ssd_info(sch);
+ /*
+ * io_subchannel_register() will also be called after device
+ * recognition has been done for a boxed device (which will already
+ * be registered). We need to reprobe since we may now have sense id
+ * information.
+ */
+ if (device_is_registered(&cdev->dev)) {
+ if (!cdev->drv) {
+ ret = device_reprobe(&cdev->dev);
+ if (ret)
+ /* We can't do much here. */
+ CIO_MSG_EVENT(0, "device_reprobe() returned"
+ " %d for 0.%x.%04x\n", ret,
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ }
+ adjust_init_count = 0;
+ goto out;
+ }
+ /*
+ * Now we know this subchannel will stay, we can throw
+ * our delayed uevent.
+ */
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ /* make it known to the system */
+ ret = ccw_device_add(cdev);
+ if (ret) {
+ CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
+ spin_lock_irqsave(sch->lock, flags);
+ sch_set_cdev(sch, NULL);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Release initial device reference. */
+ put_device(&cdev->dev);
+ goto out_err;
+ }
+out:
+ cdev->private->flags.recog_done = 1;
+ wake_up(&cdev->private->wait_q);
+out_err:
+ if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
+}
+
+static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ /* Get subchannel reference for local processing. */
+ if (!get_device(cdev->dev.parent))
+ return;
+ sch = to_subchannel(cdev->dev.parent);
+ css_sch_device_unregister(sch);
+ /* Release subchannel reference for local processing. */
+ put_device(&sch->dev);
+}
+
+/*
+ * subchannel recognition done. Called from the state machine.
+ */
+void
+io_subchannel_recog_done(struct ccw_device *cdev)
+{
+ if (css_init_done == 0) {
+ cdev->private->flags.recog_done = 1;
+ return;
+ }
+ switch (cdev->private->state) {
+ case DEV_STATE_BOXED:
+ /* Device did not respond in time. */
+ case DEV_STATE_NOT_OPER:
+ cdev->private->flags.recog_done = 1;
+ /* Remove device found not operational. */
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
+ break;
+ case DEV_STATE_OFFLINE:
+ /*
+ * We can't register the device in interrupt context so
+ * we schedule a work item.
+ */
+ ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
+ break;
+ }
+}
+
+static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
+{
+ /* Increase counter of devices currently in recognition. */
+ atomic_inc(&ccw_device_init_count);
+
+ /* Start async. device sensing. */
+ spin_lock_irq(sch->lock);
+ ccw_device_recognition(cdev);
+ spin_unlock_irq(sch->lock);
+}
+
+static int ccw_device_move_to_sch(struct ccw_device *cdev,
+ struct subchannel *sch)
+{
+ struct subchannel *old_sch;
+ int rc, old_enabled = 0;
+
+ old_sch = to_subchannel(cdev->dev.parent);
+ /* Obtain child reference for new parent. */
+ if (!get_device(&sch->dev))
+ return -ENODEV;
+
+ if (!sch_is_pseudo_sch(old_sch)) {
+ spin_lock_irq(old_sch->lock);
+ old_enabled = old_sch->schib.pmcw.ena;
+ rc = 0;
+ if (old_enabled)
+ rc = cio_disable_subchannel(old_sch);
+ spin_unlock_irq(old_sch->lock);
+ if (rc == -EBUSY) {
+ /* Release child reference for new parent. */
+ put_device(&sch->dev);
+ return rc;
+ }
+ }
+
+ mutex_lock(&sch->reg_mutex);
+ rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
+ mutex_unlock(&sch->reg_mutex);
+ if (rc) {
+ CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, sch->schid.ssid,
+ sch->schib.pmcw.dev, rc);
+ if (old_enabled) {
+ /* Try to reenable the old subchannel. */
+ spin_lock_irq(old_sch->lock);
+ cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
+ spin_unlock_irq(old_sch->lock);
+ }
+ /* Release child reference for new parent. */
+ put_device(&sch->dev);
+ return rc;
+ }
+ /* Clean up old subchannel. */
+ if (!sch_is_pseudo_sch(old_sch)) {
+ spin_lock_irq(old_sch->lock);
+ sch_set_cdev(old_sch, NULL);
+ spin_unlock_irq(old_sch->lock);
+ css_schedule_eval(old_sch->schid);
+ }
+ /* Release child reference for old parent. */
+ put_device(&old_sch->dev);
+ /* Initialize new subchannel. */
+ spin_lock_irq(sch->lock);
+ cdev->private->schid = sch->schid;
+ cdev->ccwlock = sch->lock;
+ if (!sch_is_pseudo_sch(sch))
+ sch_set_cdev(sch, cdev);
+ spin_unlock_irq(sch->lock);
+ if (!sch_is_pseudo_sch(sch))
+ css_update_ssd_info(sch);
+ return 0;
+}
+
+static int ccw_device_move_to_orph(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct channel_subsystem *css = to_css(sch->dev.parent);
+
+ return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
+}
+
+static void io_subchannel_irq(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+
+ CIO_TRACE_EVENT(6, "IRQ");
+ CIO_TRACE_EVENT(6, dev_name(&sch->dev));
+ if (cdev)
+ dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
+ else
+ inc_irq_stat(IRQIO_CIO);
+}
+
+void io_subchannel_init_config(struct subchannel *sch)
+{
+ memset(&sch->config, 0, sizeof(sch->config));
+ sch->config.csense = 1;
+}
+
+static void io_subchannel_init_fields(struct subchannel *sch)
+{
+ if (cio_is_console(sch->schid))
+ sch->opm = 0xff;
+ else
+ sch->opm = chp_get_sch_opm(sch);
+ sch->lpm = sch->schib.pmcw.pam & sch->opm;
+ sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
+
+ CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
+ " - PIM = %02X, PAM = %02X, POM = %02X\n",
+ sch->schib.pmcw.dev, sch->schid.ssid,
+ sch->schid.sch_no, sch->schib.pmcw.pim,
+ sch->schib.pmcw.pam, sch->schib.pmcw.pom);
+
+ io_subchannel_init_config(sch);
+}
+
+/*
+ * Note: We always return 0 so that we bind to the device even on error.
+ * This is needed so that our remove function is called on unregister.
+ */
+static int io_subchannel_probe(struct subchannel *sch)
+{
+ struct io_subchannel_private *io_priv;
+ struct ccw_device *cdev;
+ int rc;
+
+ if (cio_is_console(sch->schid)) {
+ rc = sysfs_create_group(&sch->dev.kobj,
+ &io_subchannel_attr_group);
+ if (rc)
+ CIO_MSG_EVENT(0, "Failed to create io subchannel "
+ "attributes for subchannel "
+ "0.%x.%04x (rc=%d)\n",
+ sch->schid.ssid, sch->schid.sch_no, rc);
+ /*
+ * The console subchannel already has an associated ccw_device.
+ * Throw the delayed uevent for the subchannel, register
+ * the ccw_device and exit.
+ */
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ cdev = sch_get_cdev(sch);
+ rc = ccw_device_add(cdev);
+ if (rc) {
+ /* Release online reference. */
+ put_device(&cdev->dev);
+ goto out_schedule;
+ }
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
+ return 0;
+ }
+ io_subchannel_init_fields(sch);
+ rc = cio_commit_config(sch);
+ if (rc)
+ goto out_schedule;
+ rc = sysfs_create_group(&sch->dev.kobj,
+ &io_subchannel_attr_group);
+ if (rc)
+ goto out_schedule;
+ /* Allocate I/O subchannel private data. */
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv)
+ goto out_schedule;
+
+ set_io_private(sch, io_priv);
+ css_schedule_eval(sch->schid);
+ return 0;
+
+out_schedule:
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ spin_unlock_irq(sch->lock);
+ return 0;
+}
+
+static int
+io_subchannel_remove (struct subchannel *sch)
+{
+ struct io_subchannel_private *io_priv = to_io_private(sch);
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+ if (!cdev)
+ goto out_free;
+ io_subchannel_quiesce(sch);
+ /* Set ccw device to not operational and drop reference. */
+ spin_lock_irq(cdev->ccwlock);
+ sch_set_cdev(sch, NULL);
+ set_io_private(sch, NULL);
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ spin_unlock_irq(cdev->ccwlock);
+ ccw_device_unregister(cdev);
+out_free:
+ kfree(io_priv);
+ sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
+ return 0;
+}
+
+static void io_subchannel_verify(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+ if (cdev)
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+}
+
+static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+ if (!cdev)
+ return;
+ if (cio_update_schib(sch))
+ goto err;
+ /* Check for I/O on path. */
+ if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
+ goto out;
+ if (cdev->private->state == DEV_STATE_ONLINE) {
+ ccw_device_kill_io(cdev);
+ goto out;
+ }
+ if (cio_clear(sch))
+ goto err;
+out:
+ /* Trigger path verification. */
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ return;
+
+err:
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+}
+
+static int io_subchannel_chp_event(struct subchannel *sch,
+ struct chp_link *link, int event)
+{
+ struct ccw_device *cdev = sch_get_cdev(sch);
+ int mask;
+
+ mask = chp_ssd_get_mask(&sch->ssd_info, link);
+ if (!mask)
+ return 0;
+ switch (event) {
+ case CHP_VARY_OFF:
+ sch->opm &= ~mask;
+ sch->lpm &= ~mask;
+ if (cdev)
+ cdev->private->path_gone_mask |= mask;
+ io_subchannel_terminate_path(sch, mask);
+ break;
+ case CHP_VARY_ON:
+ sch->opm |= mask;
+ sch->lpm |= mask;
+ if (cdev)
+ cdev->private->path_new_mask |= mask;
+ io_subchannel_verify(sch);
+ break;
+ case CHP_OFFLINE:
+ if (cio_update_schib(sch))
+ return -ENODEV;
+ if (cdev)
+ cdev->private->path_gone_mask |= mask;
+ io_subchannel_terminate_path(sch, mask);
+ break;
+ case CHP_ONLINE:
+ if (cio_update_schib(sch))
+ return -ENODEV;
+ sch->lpm |= mask & sch->opm;
+ if (cdev)
+ cdev->private->path_new_mask |= mask;
+ io_subchannel_verify(sch);
+ break;
+ }
+ return 0;
+}
+
+static void io_subchannel_quiesce(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ cdev = sch_get_cdev(sch);
+ if (cio_is_console(sch->schid))
+ goto out_unlock;
+ if (!sch->schib.pmcw.ena)
+ goto out_unlock;
+ ret = cio_disable_subchannel(sch);
+ if (ret != -EBUSY)
+ goto out_unlock;
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
+ while (ret == -EBUSY) {
+ cdev->private->state = DEV_STATE_QUIESCE;
+ cdev->private->iretry = 255;
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, HZ/10);
+ spin_unlock_irq(sch->lock);
+ wait_event(cdev->private->wait_q,
+ cdev->private->state != DEV_STATE_QUIESCE);
+ spin_lock_irq(sch->lock);
+ }
+ ret = cio_disable_subchannel(sch);
+ }
+out_unlock:
+ spin_unlock_irq(sch->lock);
+}
+
+static void io_subchannel_shutdown(struct subchannel *sch)
+{
+ io_subchannel_quiesce(sch);
+}
+
+static int device_is_disconnected(struct ccw_device *cdev)
+{
+ if (!cdev)
+ return 0;
+ return (cdev->private->state == DEV_STATE_DISCONNECTED ||
+ cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
+}
+
+static int recovery_check(struct device *dev, void *data)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ int *redo = data;
+
+ spin_lock_irq(cdev->ccwlock);
+ switch (cdev->private->state) {
+ case DEV_STATE_DISCONNECTED:
+ CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ *redo = 1;
+ break;
+ case DEV_STATE_DISCONNECTED_SENSE_ID:
+ *redo = 1;
+ break;
+ }
+ spin_unlock_irq(cdev->ccwlock);
+
+ return 0;
+}
+
+static void recovery_work_func(struct work_struct *unused)
+{
+ int redo = 0;
+
+ bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
+ if (redo) {
+ spin_lock_irq(&recovery_lock);
+ if (!timer_pending(&recovery_timer)) {
+ if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
+ recovery_phase++;
+ mod_timer(&recovery_timer, jiffies +
+ recovery_delay[recovery_phase] * HZ);
+ }
+ spin_unlock_irq(&recovery_lock);
+ } else
+ CIO_MSG_EVENT(4, "recovery: end\n");
+}
+
+static DECLARE_WORK(recovery_work, recovery_work_func);
+
+static void recovery_func(unsigned long data)
+{
+ /*
+ * We can't do our recovery in softirq context and it's not
+ * performance critical, so we schedule it.
+ */
+ schedule_work(&recovery_work);
+}
+
+static void ccw_device_schedule_recovery(void)
+{
+ unsigned long flags;
+
+ CIO_MSG_EVENT(4, "recovery: schedule\n");
+ spin_lock_irqsave(&recovery_lock, flags);
+ if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
+ recovery_phase = 0;
+ mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
+ }
+ spin_unlock_irqrestore(&recovery_lock, flags);
+}
+
+static int purge_fn(struct device *dev, void *data)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+
+ spin_lock_irq(cdev->ccwlock);
+ if (is_blacklisted(id->ssid, id->devno) &&
+ (cdev->private->state == DEV_STATE_OFFLINE) &&
+ (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
+ CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
+ id->devno);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ atomic_set(&cdev->private->onoff, 0);
+ }
+ spin_unlock_irq(cdev->ccwlock);
+ /* Abort loop in case of pending signal. */
+ if (signal_pending(current))
+ return -EINTR;
+
+ return 0;
+}
+
+/**
+ * ccw_purge_blacklisted - purge unused, blacklisted devices
+ *
+ * Unregister all ccw devices that are offline and on the blacklist.
+ */
+int ccw_purge_blacklisted(void)
+{
+ CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
+ bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
+ return 0;
+}
+
+void ccw_device_set_disconnected(struct ccw_device *cdev)
+{
+ if (!cdev)
+ return;
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->flags.fake_irb = 0;
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ if (cdev->online)
+ ccw_device_schedule_recovery();
+}
+
+void ccw_device_set_notoper(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ CIO_TRACE_EVENT(2, "notoper");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+ ccw_device_set_timeout(cdev, 0);
+ cio_disable_subchannel(sch);
+ cdev->private->state = DEV_STATE_NOT_OPER;
+}
+
+enum io_sch_action {
+ IO_SCH_UNREG,
+ IO_SCH_ORPH_UNREG,
+ IO_SCH_ATTACH,
+ IO_SCH_UNREG_ATTACH,
+ IO_SCH_ORPH_ATTACH,
+ IO_SCH_REPROBE,
+ IO_SCH_VERIFY,
+ IO_SCH_DISC,
+ IO_SCH_NOP,
+};
+
+static enum io_sch_action sch_get_action(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+ if (cio_update_schib(sch)) {
+ /* Not operational. */
+ if (!cdev)
+ return IO_SCH_UNREG;
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ return IO_SCH_UNREG;
+ return IO_SCH_ORPH_UNREG;
+ }
+ /* Operational. */
+ if (!cdev)
+ return IO_SCH_ATTACH;
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ return IO_SCH_UNREG_ATTACH;
+ return IO_SCH_ORPH_ATTACH;
+ }
+ if ((sch->schib.pmcw.pam & sch->opm) == 0) {
+ if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
+ return IO_SCH_UNREG;
+ return IO_SCH_DISC;
+ }
+ if (device_is_disconnected(cdev))
+ return IO_SCH_REPROBE;
+ if (cdev->online && !cdev->private->flags.resuming)
+ return IO_SCH_VERIFY;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return IO_SCH_UNREG_ATTACH;
+ return IO_SCH_NOP;
+}
+
+/**
+ * io_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel and device. Return
+ * zero when the event has been handled sufficiently or -EAGAIN when this
+ * function should be called again in process context.
+ */
+static int io_subchannel_sch_event(struct subchannel *sch, int process)
+{
+ unsigned long flags;
+ struct ccw_device *cdev;
+ struct ccw_dev_id dev_id;
+ enum io_sch_action action;
+ int rc = -EAGAIN;
+
+ spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+ goto out_unlock;
+ if (work_pending(&sch->todo_work))
+ goto out_unlock;
+ cdev = sch_get_cdev(sch);
+ if (cdev && work_pending(&cdev->private->todo_work))
+ goto out_unlock;
+ action = sch_get_action(sch);
+ CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, process,
+ action);
+ /* Perform immediate actions while holding the lock. */
+ switch (action) {
+ case IO_SCH_REPROBE:
+ /* Trigger device recognition. */
+ ccw_device_trigger_reprobe(cdev);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_VERIFY:
+ /* Trigger path verification. */
+ io_subchannel_verify(sch);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_DISC:
+ ccw_device_set_disconnected(cdev);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_ATTACH:
+ ccw_device_set_disconnected(cdev);
+ break;
+ case IO_SCH_UNREG_ATTACH:
+ case IO_SCH_UNREG:
+ if (!cdev)
+ break;
+ if (cdev->private->state == DEV_STATE_SENSE_ID) {
+ /*
+ * Note: delayed work triggered by this event
+ * and repeated calls to sch_event are synchronized
+ * by the above check for work_pending(cdev).
+ */
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ } else
+ ccw_device_set_notoper(cdev);
+ break;
+ case IO_SCH_NOP:
+ rc = 0;
+ goto out_unlock;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* All other actions require process context. */
+ if (!process)
+ goto out;
+ /* Handle attached ccw device. */
+ switch (action) {
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_ATTACH:
+ /* Move ccw device to orphanage. */
+ rc = ccw_device_move_to_orph(cdev);
+ if (rc)
+ goto out;
+ break;
+ case IO_SCH_UNREG_ATTACH:
+ spin_lock_irqsave(sch->lock, flags);
+ if (cdev->private->flags.resuming) {
+ /* Device will be handled later. */
+ rc = 0;
+ goto out_unlock;
+ }
+ sch_set_cdev(sch, NULL);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Unregister ccw device. */
+ ccw_device_unregister(cdev);
+ break;
+ default:
+ break;
+ }
+ /* Handle subchannel. */
+ switch (action) {
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_UNREG:
+ if (!cdev || !cdev->private->flags.resuming)
+ css_sch_device_unregister(sch);
+ break;
+ case IO_SCH_ORPH_ATTACH:
+ case IO_SCH_UNREG_ATTACH:
+ case IO_SCH_ATTACH:
+ dev_id.ssid = sch->schid.ssid;
+ dev_id.devno = sch->schib.pmcw.dev;
+ cdev = get_ccwdev_by_dev_id(&dev_id);
+ if (!cdev) {
+ sch_create_and_recog_new_device(sch);
+ break;
+ }
+ rc = ccw_device_move_to_sch(cdev, sch);
+ if (rc) {
+ /* Release reference from get_ccwdev_by_dev_id() */
+ put_device(&cdev->dev);
+ goto out;
+ }
+ spin_lock_irqsave(sch->lock, flags);
+ ccw_device_trigger_reprobe(cdev);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Release reference from get_ccwdev_by_dev_id() */
+ put_device(&cdev->dev);
+ break;
+ default:
+ break;
+ }
+ return 0;
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+out:
+ return rc;
+}
+
+static void ccw_device_set_int_class(struct ccw_device *cdev)
+{
+ struct ccw_driver *cdrv = cdev->drv;
+
+ /* Note: we interpret class 0 in this context as an uninitialized
+ * field since it translates to a non-I/O interrupt class. */
+ if (cdrv->int_class != 0)
+ cdev->private->int_class = cdrv->int_class;
+ else
+ cdev->private->int_class = IRQIO_CIO;
+}
+
+#ifdef CONFIG_CCW_CONSOLE
+int __init ccw_device_enable_console(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int rc;
+
+ if (!cdev->drv || !cdev->handler)
+ return -EINVAL;
+
+ io_subchannel_init_fields(sch);
+ rc = cio_commit_config(sch);
+ if (rc)
+ return rc;
+ sch->driver = &io_subchannel_driver;
+ io_subchannel_recog(cdev, sch);
+ /* Now wait for the async. recognition to come to an end. */
+ spin_lock_irq(cdev->ccwlock);
+ while (!dev_fsm_final_state(cdev))
+ ccw_device_wait_idle(cdev);
+
+ /* Hold on to an extra reference while device is online. */
+ get_device(&cdev->dev);
+ rc = ccw_device_online(cdev);
+ if (rc)
+ goto out_unlock;
+
+ while (!dev_fsm_final_state(cdev))
+ ccw_device_wait_idle(cdev);
+
+ if (cdev->private->state == DEV_STATE_ONLINE)
+ cdev->online = 1;
+ else
+ rc = -EIO;
+out_unlock:
+ spin_unlock_irq(cdev->ccwlock);
+ if (rc) /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return rc;
+}
+
+struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
+{
+ struct io_subchannel_private *io_priv;
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+
+ sch = cio_probe_console();
+ if (IS_ERR(sch))
+ return ERR_CAST(sch);
+
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv) {
+ put_device(&sch->dev);
+ return ERR_PTR(-ENOMEM);
+ }
+ set_io_private(sch, io_priv);
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev)) {
+ put_device(&sch->dev);
+ kfree(io_priv);
+ return cdev;
+ }
+ cdev->drv = drv;
+ ccw_device_set_int_class(cdev);
+ return cdev;
+}
+
+void __init ccw_device_destroy_console(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct io_subchannel_private *io_priv = to_io_private(sch);
+
+ set_io_private(sch, NULL);
+ put_device(&sch->dev);
+ put_device(&cdev->dev);
+ kfree(io_priv);
+}
+
+/**
+ * ccw_device_wait_idle() - busy wait for device to become idle
+ * @cdev: ccw device
+ *
+ * Poll until activity control is zero, that is, no function or data
+ * transfer is pending/active.
+ * Called with device lock being held.
+ */
+void ccw_device_wait_idle(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ while (1) {
+ cio_tsch(sch);
+ if (sch->schib.scsw.cmd.actl == 0)
+ break;
+ udelay_simple(100);
+ }
+}
+
+static int ccw_device_pm_restore(struct device *dev);
+
+int ccw_device_force_console(struct ccw_device *cdev)
+{
+ return ccw_device_pm_restore(&cdev->dev);
+}
+EXPORT_SYMBOL_GPL(ccw_device_force_console);
+#endif
+
+/*
+ * get ccw_device matching the busid, but only if owned by cdrv
+ */
+static int
+__ccwdev_check_busid(struct device *dev, void *id)
+{
+ char *bus_id;
+
+ bus_id = id;
+
+ return (strcmp(bus_id, dev_name(dev)) == 0);
+}
+
+
+/**
+ * get_ccwdev_by_busid() - obtain device from a bus id
+ * @cdrv: driver the device is owned by
+ * @bus_id: bus id of the device to be searched
+ *
+ * This function searches all devices owned by @cdrv for a device with a bus
+ * id matching @bus_id.
+ * Returns:
+ * If a match is found, its reference count of the found device is increased
+ * and it is returned; else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
+ const char *bus_id)
+{
+ struct device *dev;
+
+ dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id,
+ __ccwdev_check_busid);
+
+ return dev ? to_ccwdev(dev) : NULL;
+}
+
+/************************** device driver handling ************************/
+
+/* This is the implementation of the ccw_driver class. The probe, remove
+ * and release methods are initially very similar to the device_driver
+ * implementations, with the difference that they have ccw_device
+ * arguments.
+ *
+ * A ccw driver also contains the information that is needed for
+ * device matching.
+ */
+static int
+ccw_device_probe (struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
+ int ret;
+
+ cdev->drv = cdrv; /* to let the driver call _set_online */
+ ccw_device_set_int_class(cdev);
+ ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
+ if (ret) {
+ cdev->drv = NULL;
+ cdev->private->int_class = IRQIO_CIO;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ccw_device_remove(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_driver *cdrv = cdev->drv;
+ int ret;
+
+ if (cdrv->remove)
+ cdrv->remove(cdev);
+
+ spin_lock_irq(cdev->ccwlock);
+ if (cdev->online) {
+ cdev->online = 0;
+ ret = ccw_device_offline(cdev);
+ spin_unlock_irq(cdev->ccwlock);
+ if (ret == 0)
+ wait_event(cdev->private->wait_q,
+ dev_fsm_final_state(cdev));
+ else
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ /* Give up reference obtained in ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ spin_lock_irq(cdev->ccwlock);
+ }
+ ccw_device_set_timeout(cdev, 0);
+ cdev->drv = NULL;
+ cdev->private->int_class = IRQIO_CIO;
+ spin_unlock_irq(cdev->ccwlock);
+ return 0;
+}
+
+static void ccw_device_shutdown(struct device *dev)
+{
+ struct ccw_device *cdev;
+
+ cdev = to_ccwdev(dev);
+ if (cdev->drv && cdev->drv->shutdown)
+ cdev->drv->shutdown(cdev);
+ disable_cmf(cdev);
+}
+
+static int ccw_device_pm_prepare(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ if (work_pending(&cdev->private->todo_work))
+ return -EAGAIN;
+ /* Fail while device is being set online/offline. */
+ if (atomic_read(&cdev->private->onoff))
+ return -EAGAIN;
+
+ if (cdev->online && cdev->drv && cdev->drv->prepare)
+ return cdev->drv->prepare(cdev);
+
+ return 0;
+}
+
+static void ccw_device_pm_complete(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ if (cdev->online && cdev->drv && cdev->drv->complete)
+ cdev->drv->complete(cdev);
+}
+
+static int ccw_device_pm_freeze(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int ret, cm_enabled;
+
+ /* Fail suspend while device is in transistional state. */
+ if (!dev_fsm_final_state(cdev))
+ return -EAGAIN;
+ if (!cdev->online)
+ return 0;
+ if (cdev->drv && cdev->drv->freeze) {
+ ret = cdev->drv->freeze(cdev);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irq(sch->lock);
+ cm_enabled = cdev->private->cmb != NULL;
+ spin_unlock_irq(sch->lock);
+ if (cm_enabled) {
+ /* Don't have the css write on memory. */
+ ret = ccw_set_cmf(cdev, 0);
+ if (ret)
+ return ret;
+ }
+ /* From here on, disallow device driver I/O. */
+ spin_lock_irq(sch->lock);
+ ret = cio_disable_subchannel(sch);
+ spin_unlock_irq(sch->lock);
+
+ return ret;
+}
+
+static int ccw_device_pm_thaw(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int ret, cm_enabled;
+
+ if (!cdev->online)
+ return 0;
+
+ spin_lock_irq(sch->lock);
+ /* Allow device driver I/O again. */
+ ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ cm_enabled = cdev->private->cmb != NULL;
+ spin_unlock_irq(sch->lock);
+ if (ret)
+ return ret;
+
+ if (cm_enabled) {
+ ret = ccw_set_cmf(cdev, 1);
+ if (ret)
+ return ret;
+ }
+
+ if (cdev->drv && cdev->drv->thaw)
+ ret = cdev->drv->thaw(cdev);
+
+ return ret;
+}
+
+static void __ccw_device_pm_restore(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ spin_lock_irq(sch->lock);
+ if (cio_is_console(sch->schid)) {
+ cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ goto out_unlock;
+ }
+ /*
+ * While we were sleeping, devices may have gone or become
+ * available again. Kick re-detection.
+ */
+ cdev->private->flags.resuming = 1;
+ cdev->private->path_new_mask = LPM_ANYPATH;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+ spin_unlock_irq(sch->lock);
+ css_wait_for_slow_path();
+
+ /* cdev may have been moved to a different subchannel. */
+ sch = to_subchannel(cdev->dev.parent);
+ spin_lock_irq(sch->lock);
+ if (cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_OFFLINE)
+ goto out_unlock;
+
+ ccw_device_recognition(cdev);
+ spin_unlock_irq(sch->lock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED);
+ spin_lock_irq(sch->lock);
+
+out_unlock:
+ cdev->private->flags.resuming = 0;
+ spin_unlock_irq(sch->lock);
+}
+
+static int resume_handle_boxed(struct ccw_device *cdev)
+{
+ cdev->private->state = DEV_STATE_BOXED;
+ if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
+ return 0;
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ return -ENODEV;
+}
+
+static int resume_handle_disc(struct ccw_device *cdev)
+{
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
+ return 0;
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ return -ENODEV;
+}
+
+static int ccw_device_pm_restore(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch;
+ int ret = 0;
+
+ __ccw_device_pm_restore(cdev);
+ sch = to_subchannel(cdev->dev.parent);
+ spin_lock_irq(sch->lock);
+ if (cio_is_console(sch->schid))
+ goto out_restore;
+
+ /* check recognition results */
+ switch (cdev->private->state) {
+ case DEV_STATE_OFFLINE:
+ case DEV_STATE_ONLINE:
+ cdev->private->flags.donotify = 0;
+ break;
+ case DEV_STATE_BOXED:
+ ret = resume_handle_boxed(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
+ default:
+ ret = resume_handle_disc(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
+ }
+ /* check if the device type has changed */
+ if (!ccw_device_test_sense_data(cdev)) {
+ ccw_device_update_sense_data(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+ if (!cdev->online)
+ goto out_unlock;
+
+ if (ccw_device_online(cdev)) {
+ ret = resume_handle_disc(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
+ }
+ spin_unlock_irq(sch->lock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ spin_lock_irq(sch->lock);
+
+ if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ /* reenable cmf, if needed */
+ if (cdev->private->cmb) {
+ spin_unlock_irq(sch->lock);
+ ret = ccw_set_cmf(cdev, 1);
+ spin_lock_irq(sch->lock);
+ if (ret) {
+ CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
+ "(rc=%d)\n", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
+ ret = 0;
+ }
+ }
+
+out_restore:
+ spin_unlock_irq(sch->lock);
+ if (cdev->online && cdev->drv && cdev->drv->restore)
+ ret = cdev->drv->restore(cdev);
+ return ret;
+
+out_unlock:
+ spin_unlock_irq(sch->lock);
+ return ret;
+}
+
+static const struct dev_pm_ops ccw_pm_ops = {
+ .prepare = ccw_device_pm_prepare,
+ .complete = ccw_device_pm_complete,
+ .freeze = ccw_device_pm_freeze,
+ .thaw = ccw_device_pm_thaw,
+ .restore = ccw_device_pm_restore,
+};
+
+static struct bus_type ccw_bus_type = {
+ .name = "ccw",
+ .match = ccw_bus_match,
+ .uevent = ccw_uevent,
+ .probe = ccw_device_probe,
+ .remove = ccw_device_remove,
+ .shutdown = ccw_device_shutdown,
+ .pm = &ccw_pm_ops,
+};
+
+/**
+ * ccw_driver_register() - register a ccw driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccw_driver_register(struct ccw_driver *cdriver)
+{
+ struct device_driver *drv = &cdriver->driver;
+
+ drv->bus = &ccw_bus_type;
+
+ return driver_register(drv);
+}
+
+/**
+ * ccw_driver_unregister() - deregister a ccw driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccw_driver_unregister(struct ccw_driver *cdriver)
+{
+ driver_unregister(&cdriver->driver);
+}
+
+static void ccw_device_todo(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+ enum cdev_todo todo;
+
+ priv = container_of(work, struct ccw_device_private, todo_work);
+ cdev = priv->cdev;
+ sch = to_subchannel(cdev->dev.parent);
+ /* Find out todo. */
+ spin_lock_irq(cdev->ccwlock);
+ todo = priv->todo;
+ priv->todo = CDEV_TODO_NOTHING;
+ CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
+ priv->dev_id.ssid, priv->dev_id.devno, todo);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Perform todo. */
+ switch (todo) {
+ case CDEV_TODO_ENABLE_CMF:
+ cmf_reenable(cdev);
+ break;
+ case CDEV_TODO_REBIND:
+ ccw_device_do_unbind_bind(cdev);
+ break;
+ case CDEV_TODO_REGISTER:
+ io_subchannel_register(cdev);
+ break;
+ case CDEV_TODO_UNREG_EVAL:
+ if (!sch_is_pseudo_sch(sch))
+ css_schedule_eval(sch->schid);
+ /* fall-through */
+ case CDEV_TODO_UNREG:
+ if (sch_is_pseudo_sch(sch))
+ ccw_device_unregister(cdev);
+ else
+ ccw_device_call_sch_unregister(cdev);
+ break;
+ default:
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&cdev->dev);
+}
+
+/**
+ * ccw_device_sched_todo - schedule ccw device operation
+ * @cdev: ccw device
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with ccwdev lock held.
+ */
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
+{
+ CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+ todo);
+ if (cdev->private->todo >= todo)
+ return;
+ cdev->private->todo = todo;
+ /* Get workqueue ref. */
+ if (!get_device(&cdev->dev))
+ return;
+ if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&cdev->dev);
+ }
+}
+
+/**
+ * ccw_device_siosl() - initiate logging
+ * @cdev: ccw device
+ *
+ * This function is used to invoke model-dependent logging within the channel
+ * subsystem.
+ */
+int ccw_device_siosl(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ return chsc_siosl(sch->schid);
+}
+EXPORT_SYMBOL_GPL(ccw_device_siosl);
+
+MODULE_LICENSE("GPL");
+EXPORT_SYMBOL(ccw_device_set_online);
+EXPORT_SYMBOL(ccw_device_set_offline);
+EXPORT_SYMBOL(ccw_driver_register);
+EXPORT_SYMBOL(ccw_driver_unregister);
+EXPORT_SYMBOL(get_ccwdev_by_busid);
diff --git a/kernel/drivers/s390/cio/device.h b/kernel/drivers/s390/cio/device.h
new file mode 100644
index 000000000..8d1d29873
--- /dev/null
+++ b/kernel/drivers/s390/cio/device.h
@@ -0,0 +1,150 @@
+#ifndef S390_DEVICE_H
+#define S390_DEVICE_H
+
+#include <asm/ccwdev.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <linux/kernel_stat.h>
+#include "io_sch.h"
+
+/*
+ * states of the device statemachine
+ */
+enum dev_state {
+ DEV_STATE_NOT_OPER,
+ DEV_STATE_SENSE_PGID,
+ DEV_STATE_SENSE_ID,
+ DEV_STATE_OFFLINE,
+ DEV_STATE_VERIFY,
+ DEV_STATE_ONLINE,
+ DEV_STATE_W4SENSE,
+ DEV_STATE_DISBAND_PGID,
+ DEV_STATE_BOXED,
+ /* states to wait for i/o completion before doing something */
+ DEV_STATE_TIMEOUT_KILL,
+ DEV_STATE_QUIESCE,
+ /* special states for devices gone not operational */
+ DEV_STATE_DISCONNECTED,
+ DEV_STATE_DISCONNECTED_SENSE_ID,
+ DEV_STATE_CMFCHANGE,
+ DEV_STATE_CMFUPDATE,
+ DEV_STATE_STEAL_LOCK,
+ /* last element! */
+ NR_DEV_STATES
+};
+
+/*
+ * asynchronous events of the device statemachine
+ */
+enum dev_event {
+ DEV_EVENT_NOTOPER,
+ DEV_EVENT_INTERRUPT,
+ DEV_EVENT_TIMEOUT,
+ DEV_EVENT_VERIFY,
+ /* last element! */
+ NR_DEV_EVENTS
+};
+
+struct ccw_device;
+
+/*
+ * action called through jumptable
+ */
+typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
+extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
+
+static inline void
+dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int state = cdev->private->state;
+
+ if (dev_event == DEV_EVENT_INTERRUPT) {
+ if (state == DEV_STATE_ONLINE)
+ inc_irq_stat(cdev->private->int_class);
+ else if (state != DEV_STATE_CMFCHANGE &&
+ state != DEV_STATE_CMFUPDATE)
+ inc_irq_stat(IRQIO_CIO);
+ }
+ dev_jumptable[state][dev_event](cdev, dev_event);
+}
+
+/*
+ * Delivers 1 if the device state is final.
+ */
+static inline int
+dev_fsm_final_state(struct ccw_device *cdev)
+{
+ return (cdev->private->state == DEV_STATE_NOT_OPER ||
+ cdev->private->state == DEV_STATE_OFFLINE ||
+ cdev->private->state == DEV_STATE_ONLINE ||
+ cdev->private->state == DEV_STATE_BOXED);
+}
+
+int __init io_subchannel_init(void);
+
+void io_subchannel_recog_done(struct ccw_device *cdev);
+void io_subchannel_init_config(struct subchannel *sch);
+
+int ccw_device_cancel_halt_clear(struct ccw_device *);
+
+int ccw_device_is_orphan(struct ccw_device *);
+
+void ccw_device_recognition(struct ccw_device *);
+int ccw_device_online(struct ccw_device *);
+int ccw_device_offline(struct ccw_device *);
+void ccw_device_update_sense_data(struct ccw_device *);
+int ccw_device_test_sense_data(struct ccw_device *);
+void ccw_device_schedule_sch_unregister(struct ccw_device *);
+int ccw_purge_blacklisted(void);
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
+
+/* Function prototypes for device status and basic sense stuff. */
+void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
+void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
+int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
+int ccw_device_do_sense(struct ccw_device *, struct irb *);
+
+/* Function prototype for internal request handling. */
+int lpm_adjust(int lpm, int mask);
+void ccw_request_start(struct ccw_device *);
+int ccw_request_cancel(struct ccw_device *cdev);
+void ccw_request_handler(struct ccw_device *cdev);
+void ccw_request_timeout(struct ccw_device *cdev);
+void ccw_request_notoper(struct ccw_device *cdev);
+
+/* Function prototypes for sense id stuff. */
+void ccw_device_sense_id_start(struct ccw_device *);
+void ccw_device_sense_id_done(struct ccw_device *, int);
+
+/* Function prototypes for path grouping stuff. */
+void ccw_device_verify_start(struct ccw_device *);
+void ccw_device_verify_done(struct ccw_device *, int);
+
+void ccw_device_disband_start(struct ccw_device *);
+void ccw_device_disband_done(struct ccw_device *, int);
+
+void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
+void ccw_device_stlck_done(struct ccw_device *, void *, int);
+
+int ccw_device_call_handler(struct ccw_device *);
+
+int ccw_device_stlck(struct ccw_device *);
+
+/* Helper function for machine check handling. */
+void ccw_device_trigger_reprobe(struct ccw_device *);
+void ccw_device_kill_io(struct ccw_device *);
+int ccw_device_notify(struct ccw_device *, int);
+void ccw_device_set_disconnected(struct ccw_device *cdev);
+void ccw_device_set_notoper(struct ccw_device *cdev);
+
+void ccw_device_set_timeout(struct ccw_device *, int);
+
+/* Channel measurement facility related */
+void retry_set_schib(struct ccw_device *cdev);
+void cmf_retry_copy_block(struct ccw_device *);
+int cmf_reenable(struct ccw_device *);
+int ccw_set_cmf(struct ccw_device *cdev, int enable);
+extern struct device_attribute dev_attr_cmb_enable;
+#endif
diff --git a/kernel/drivers/s390/cio/device_fsm.c b/kernel/drivers/s390/cio/device_fsm.c
new file mode 100644
index 000000000..83da53c8e
--- /dev/null
+++ b/kernel/drivers/s390/cio/device_fsm.c
@@ -0,0 +1,1117 @@
+/*
+ * finite state machine for device handling
+ *
+ * Copyright IBM Corp. 2002, 2008
+ * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/chpid.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "chsc.h"
+#include "ioasm.h"
+#include "chp.h"
+
+static int timeout_log_enabled;
+
+static int __init ccw_timeout_log_setup(char *unused)
+{
+ timeout_log_enabled = 1;
+ return 1;
+}
+
+__setup("ccw_timeout_log", ccw_timeout_log_setup);
+
+static void ccw_timeout_log(struct ccw_device *cdev)
+{
+ struct schib schib;
+ struct subchannel *sch;
+ struct io_subchannel_private *private;
+ union orb *orb;
+ int cc;
+
+ sch = to_subchannel(cdev->dev.parent);
+ private = to_io_private(sch);
+ orb = &private->orb;
+ cc = stsch_err(sch->schid, &schib);
+
+ printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
+ "device information:\n", get_tod_clock());
+ printk(KERN_WARNING "cio: orb:\n");
+ print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
+ orb, sizeof(*orb), 0);
+ printk(KERN_WARNING "cio: ccw device bus id: %s\n",
+ dev_name(&cdev->dev));
+ printk(KERN_WARNING "cio: subchannel bus id: %s\n",
+ dev_name(&sch->dev));
+ printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
+ "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
+
+ if (orb->tm.b) {
+ printk(KERN_WARNING "cio: orb indicates transport mode\n");
+ printk(KERN_WARNING "cio: last tcw:\n");
+ print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
+ (void *)(addr_t)orb->tm.tcw,
+ sizeof(struct tcw), 0);
+ } else {
+ printk(KERN_WARNING "cio: orb indicates command mode\n");
+ if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
+ (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
+ printk(KERN_WARNING "cio: last channel program "
+ "(intern):\n");
+ else
+ printk(KERN_WARNING "cio: last channel program:\n");
+
+ print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
+ (void *)(addr_t)orb->cmd.cpa,
+ sizeof(struct ccw1), 0);
+ }
+ printk(KERN_WARNING "cio: ccw device state: %d\n",
+ cdev->private->state);
+ printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
+ printk(KERN_WARNING "cio: schib:\n");
+ print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
+ &schib, sizeof(schib), 0);
+ printk(KERN_WARNING "cio: ccw device flags:\n");
+ print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
+ &cdev->private->flags, sizeof(cdev->private->flags), 0);
+}
+
+/*
+ * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
+ */
+static void
+ccw_device_timeout(unsigned long data)
+{
+ struct ccw_device *cdev;
+
+ cdev = (struct ccw_device *) data;
+ spin_lock_irq(cdev->ccwlock);
+ if (timeout_log_enabled)
+ ccw_timeout_log(cdev);
+ dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
+ spin_unlock_irq(cdev->ccwlock);
+}
+
+/*
+ * Set timeout
+ */
+void
+ccw_device_set_timeout(struct ccw_device *cdev, int expires)
+{
+ if (expires == 0) {
+ del_timer(&cdev->private->timer);
+ return;
+ }
+ if (timer_pending(&cdev->private->timer)) {
+ if (mod_timer(&cdev->private->timer, jiffies + expires))
+ return;
+ }
+ cdev->private->timer.function = ccw_device_timeout;
+ cdev->private->timer.data = (unsigned long) cdev;
+ cdev->private->timer.expires = jiffies + expires;
+ add_timer(&cdev->private->timer);
+}
+
+/*
+ * Cancel running i/o. This is called repeatedly since halt/clear are
+ * asynchronous operations. We do one try with cio_cancel, two tries
+ * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
+ * Returns 0 if device now idle, -ENODEV for device not operational and
+ * -EBUSY if an interrupt is expected (either from halt/clear or from a
+ * status pending).
+ */
+int
+ccw_device_cancel_halt_clear(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (cio_update_schib(sch))
+ return -ENODEV;
+ if (!sch->schib.pmcw.ena)
+ /* Not operational -> done. */
+ return 0;
+ /* Stage 1: cancel io. */
+ if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
+ !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
+ if (!scsw_is_tm(&sch->schib.scsw)) {
+ ret = cio_cancel(sch);
+ if (ret != -EINVAL)
+ return ret;
+ }
+ /* cancel io unsuccessful or not applicable (transport mode).
+ * Continue with asynchronous instructions. */
+ cdev->private->iretry = 3; /* 3 halt retries. */
+ }
+ if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
+ /* Stage 2: halt io. */
+ if (cdev->private->iretry) {
+ cdev->private->iretry--;
+ ret = cio_halt(sch);
+ if (ret != -EBUSY)
+ return (ret == 0) ? -EBUSY : ret;
+ }
+ /* halt io unsuccessful. */
+ cdev->private->iretry = 255; /* 255 clear retries. */
+ }
+ /* Stage 3: clear io. */
+ if (cdev->private->iretry) {
+ cdev->private->iretry--;
+ ret = cio_clear (sch);
+ return (ret == 0) ? -EBUSY : ret;
+ }
+ /* Function was unsuccessful */
+ CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
+ return -EIO;
+}
+
+void ccw_device_update_sense_data(struct ccw_device *cdev)
+{
+ memset(&cdev->id, 0, sizeof(cdev->id));
+ cdev->id.cu_type = cdev->private->senseid.cu_type;
+ cdev->id.cu_model = cdev->private->senseid.cu_model;
+ cdev->id.dev_type = cdev->private->senseid.dev_type;
+ cdev->id.dev_model = cdev->private->senseid.dev_model;
+}
+
+int ccw_device_test_sense_data(struct ccw_device *cdev)
+{
+ return cdev->id.cu_type == cdev->private->senseid.cu_type &&
+ cdev->id.cu_model == cdev->private->senseid.cu_model &&
+ cdev->id.dev_type == cdev->private->senseid.dev_type &&
+ cdev->id.dev_model == cdev->private->senseid.dev_model;
+}
+
+/*
+ * The machine won't give us any notification by machine check if a chpid has
+ * been varied online on the SE so we have to find out by magic (i. e. driving
+ * the channel subsystem to device selection and updating our path masks).
+ */
+static void
+__recover_lost_chpids(struct subchannel *sch, int old_lpm)
+{
+ int mask, i;
+ struct chp_id chpid;
+
+ chp_id_init(&chpid);
+ for (i = 0; i<8; i++) {
+ mask = 0x80 >> i;
+ if (!(sch->lpm & mask))
+ continue;
+ if (old_lpm & mask)
+ continue;
+ chpid.id = sch->schib.pmcw.chpid[i];
+ if (!chp_is_registered(chpid))
+ css_schedule_eval_all();
+ }
+}
+
+/*
+ * Stop device recognition.
+ */
+static void
+ccw_device_recog_done(struct ccw_device *cdev, int state)
+{
+ struct subchannel *sch;
+ int old_lpm;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ if (cio_disable_subchannel(sch))
+ state = DEV_STATE_NOT_OPER;
+ /*
+ * Now that we tried recognition, we have performed device selection
+ * through ssch() and the path information is up to date.
+ */
+ old_lpm = sch->lpm;
+
+ /* Check since device may again have become not operational. */
+ if (cio_update_schib(sch))
+ state = DEV_STATE_NOT_OPER;
+ else
+ sch->lpm = sch->schib.pmcw.pam & sch->opm;
+
+ if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
+ /* Force reprobe on all chpids. */
+ old_lpm = 0;
+ if (sch->lpm != old_lpm)
+ __recover_lost_chpids(sch, old_lpm);
+ if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
+ (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
+ cdev->private->flags.recog_done = 1;
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
+ if (cdev->private->flags.resuming) {
+ cdev->private->state = state;
+ cdev->private->flags.recog_done = 1;
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
+ switch (state) {
+ case DEV_STATE_NOT_OPER:
+ break;
+ case DEV_STATE_OFFLINE:
+ if (!cdev->online) {
+ ccw_device_update_sense_data(cdev);
+ break;
+ }
+ cdev->private->state = DEV_STATE_OFFLINE;
+ cdev->private->flags.recog_done = 1;
+ if (ccw_device_test_sense_data(cdev)) {
+ cdev->private->flags.donotify = 1;
+ ccw_device_online(cdev);
+ wake_up(&cdev->private->wait_q);
+ } else {
+ ccw_device_update_sense_data(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+ }
+ return;
+ case DEV_STATE_BOXED:
+ if (cdev->id.cu_type != 0) { /* device was recognized before */
+ cdev->private->flags.recog_done = 1;
+ cdev->private->state = DEV_STATE_BOXED;
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
+ break;
+ }
+ cdev->private->state = state;
+ io_subchannel_recog_done(cdev);
+ wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Function called from device_id.c after sense id has completed.
+ */
+void
+ccw_device_sense_id_done(struct ccw_device *cdev, int err)
+{
+ switch (err) {
+ case 0:
+ ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
+ break;
+ case -ETIME: /* Sense id stopped by timeout. */
+ ccw_device_recog_done(cdev, DEV_STATE_BOXED);
+ break;
+ default:
+ ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ }
+}
+
+/**
+ * ccw_device_notify() - inform the device's driver about an event
+ * @cdev: device for which an event occurred
+ * @event: event that occurred
+ *
+ * Returns:
+ * -%EINVAL if the device is offline or has no driver.
+ * -%EOPNOTSUPP if the device's driver has no notifier registered.
+ * %NOTIFY_OK if the driver wants to keep the device.
+ * %NOTIFY_BAD if the driver doesn't want to keep the device.
+ */
+int ccw_device_notify(struct ccw_device *cdev, int event)
+{
+ int ret = -EINVAL;
+
+ if (!cdev->drv)
+ goto out;
+ if (!cdev->online)
+ goto out;
+ CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+ event);
+ if (!cdev->drv->notify) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ if (cdev->drv->notify(cdev, event))
+ ret = NOTIFY_OK;
+ else
+ ret = NOTIFY_BAD;
+out:
+ return ret;
+}
+
+static void ccw_device_oper_notify(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
+ /* Reenable channel measurements, if needed. */
+ ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
+ /* Save indication for new paths. */
+ cdev->private->path_new_mask = sch->vpm;
+ return;
+ }
+ /* Driver doesn't want device back. */
+ ccw_device_set_notoper(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+}
+
+/*
+ * Finished with online/offline processing.
+ */
+static void
+ccw_device_done(struct ccw_device *cdev, int state)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ ccw_device_set_timeout(cdev, 0);
+
+ if (state != DEV_STATE_ONLINE)
+ cio_disable_subchannel(sch);
+
+ /* Reset device status. */
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+ cdev->private->state = state;
+
+ switch (state) {
+ case DEV_STATE_BOXED:
+ CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
+ cdev->private->dev_id.devno, sch->schid.sch_no);
+ if (cdev->online &&
+ ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ cdev->private->flags.donotify = 0;
+ break;
+ case DEV_STATE_NOT_OPER:
+ CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
+ cdev->private->dev_id.devno, sch->schid.sch_no);
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ else
+ ccw_device_set_disconnected(cdev);
+ cdev->private->flags.donotify = 0;
+ break;
+ case DEV_STATE_DISCONNECTED:
+ CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
+ "%04x\n", cdev->private->dev_id.devno,
+ sch->schid.sch_no);
+ if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ } else
+ ccw_device_set_disconnected(cdev);
+ cdev->private->flags.donotify = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (cdev->private->flags.donotify) {
+ cdev->private->flags.donotify = 0;
+ ccw_device_oper_notify(cdev);
+ }
+ wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Start device recognition.
+ */
+void ccw_device_recognition(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ /*
+ * We used to start here with a sense pgid to find out whether a device
+ * is locked by someone else. Unfortunately, the sense pgid command
+ * code has other meanings on devices predating the path grouping
+ * algorithm, so we start with sense id and box the device after an
+ * timeout (or if sense pgid during path verification detects the device
+ * is locked, as may happen on newer devices).
+ */
+ cdev->private->flags.recog_done = 0;
+ cdev->private->state = DEV_STATE_SENSE_ID;
+ if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
+ ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ return;
+ }
+ ccw_device_sense_id_start(cdev);
+}
+
+/*
+ * Handle events for states that use the ccw request infrastructure.
+ */
+static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
+{
+ switch (e) {
+ case DEV_EVENT_NOTOPER:
+ ccw_request_notoper(cdev);
+ break;
+ case DEV_EVENT_INTERRUPT:
+ ccw_request_handler(cdev);
+ break;
+ case DEV_EVENT_TIMEOUT:
+ ccw_request_timeout(cdev);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ccw_device_report_path_events(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int path_event[8];
+ int chp, mask;
+
+ for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
+ path_event[chp] = PE_NONE;
+ if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
+ path_event[chp] |= PE_PATH_GONE;
+ if (mask & cdev->private->path_new_mask & sch->vpm)
+ path_event[chp] |= PE_PATH_AVAILABLE;
+ if (mask & cdev->private->pgid_reset_mask & sch->vpm)
+ path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
+ }
+ if (cdev->online && cdev->drv->path_event)
+ cdev->drv->path_event(cdev, path_event);
+}
+
+static void ccw_device_reset_path_events(struct ccw_device *cdev)
+{
+ cdev->private->path_gone_mask = 0;
+ cdev->private->path_new_mask = 0;
+ cdev->private->pgid_reset_mask = 0;
+}
+
+static void create_fake_irb(struct irb *irb, int type)
+{
+ memset(irb, 0, sizeof(*irb));
+ if (type == FAKE_CMD_IRB) {
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ } else if (type == FAKE_TM_IRB) {
+ struct tm_scsw *scsw = &irb->scsw.tm;
+ scsw->x = 1;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /* Update schib - pom may have changed. */
+ if (cio_update_schib(sch)) {
+ err = -ENODEV;
+ goto callback;
+ }
+ /* Update lpm with verified path mask. */
+ sch->lpm = sch->vpm;
+ /* Repeat path verification? */
+ if (cdev->private->flags.doverify) {
+ ccw_device_verify_start(cdev);
+ return;
+ }
+callback:
+ switch (err) {
+ case 0:
+ ccw_device_done(cdev, DEV_STATE_ONLINE);
+ /* Deliver fake irb to device driver, if needed. */
+ if (cdev->private->flags.fake_irb) {
+ create_fake_irb(&cdev->private->irb,
+ cdev->private->flags.fake_irb);
+ cdev->private->flags.fake_irb = 0;
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ &cdev->private->irb);
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ }
+ ccw_device_report_path_events(cdev);
+ break;
+ case -ETIME:
+ case -EUSERS:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_BOXED);
+ break;
+ case -EACCES:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
+ break;
+ default:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ }
+ ccw_device_reset_path_events(cdev);
+}
+
+/*
+ * Get device online.
+ */
+int
+ccw_device_online(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if ((cdev->private->state != DEV_STATE_OFFLINE) &&
+ (cdev->private->state != DEV_STATE_BOXED))
+ return -EINVAL;
+ sch = to_subchannel(cdev->dev.parent);
+ ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ if (ret != 0) {
+ /* Couldn't enable the subchannel for i/o. Sick device. */
+ if (ret == -ENODEV)
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ return ret;
+ }
+ /* Start initial path verification. */
+ cdev->private->state = DEV_STATE_VERIFY;
+ ccw_device_verify_start(cdev);
+ return 0;
+}
+
+void
+ccw_device_disband_done(struct ccw_device *cdev, int err)
+{
+ switch (err) {
+ case 0:
+ ccw_device_done(cdev, DEV_STATE_OFFLINE);
+ break;
+ case -ETIME:
+ ccw_device_done(cdev, DEV_STATE_BOXED);
+ break;
+ default:
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ }
+}
+
+/*
+ * Shutdown device.
+ */
+int
+ccw_device_offline(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ /* Allow ccw_device_offline while disconnected. */
+ if (cdev->private->state == DEV_STATE_DISCONNECTED ||
+ cdev->private->state == DEV_STATE_NOT_OPER) {
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ return 0;
+ }
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ ccw_device_done(cdev, DEV_STATE_BOXED);
+ return 0;
+ }
+ if (ccw_device_is_orphan(cdev)) {
+ ccw_device_done(cdev, DEV_STATE_OFFLINE);
+ return 0;
+ }
+ sch = to_subchannel(cdev->dev.parent);
+ if (cio_update_schib(sch))
+ return -ENODEV;
+ if (scsw_actl(&sch->schib.scsw) != 0)
+ return -EBUSY;
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EINVAL;
+ /* Are we doing path grouping? */
+ if (!cdev->private->flags.pgroup) {
+ /* No, set state offline immediately. */
+ ccw_device_done(cdev, DEV_STATE_OFFLINE);
+ return 0;
+ }
+ /* Start Set Path Group commands. */
+ cdev->private->state = DEV_STATE_DISBAND_PGID;
+ ccw_device_disband_start(cdev);
+ return 0;
+}
+
+/*
+ * Handle not operational event in non-special state.
+ */
+static void ccw_device_generic_notoper(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ else
+ ccw_device_set_disconnected(cdev);
+}
+
+/*
+ * Handle path verification event in offline state.
+ */
+static void ccw_device_offline_verify(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ css_schedule_eval(sch->schid);
+}
+
+/*
+ * Handle path verification event.
+ */
+static void
+ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ if (cdev->private->state == DEV_STATE_W4SENSE) {
+ cdev->private->flags.doverify = 1;
+ return;
+ }
+ sch = to_subchannel(cdev->dev.parent);
+ /*
+ * Since we might not just be coming from an interrupt from the
+ * subchannel we have to update the schib.
+ */
+ if (cio_update_schib(sch)) {
+ ccw_device_verify_done(cdev, -ENODEV);
+ return;
+ }
+
+ if (scsw_actl(&sch->schib.scsw) != 0 ||
+ (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
+ (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
+ /*
+ * No final status yet or final status not yet delivered
+ * to the device driver. Can't do path verification now,
+ * delay until final status was delivered.
+ */
+ cdev->private->flags.doverify = 1;
+ return;
+ }
+ /* Device is idle, we can do the path verification. */
+ cdev->private->state = DEV_STATE_VERIFY;
+ ccw_device_verify_start(cdev);
+}
+
+/*
+ * Handle path verification event in boxed state.
+ */
+static void ccw_device_boxed_verify(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ if (cdev->online) {
+ if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ else
+ ccw_device_online_verify(cdev, dev_event);
+ } else
+ css_schedule_eval(sch->schid);
+}
+
+/*
+ * Got an interrupt for a normal io (state online).
+ */
+static void
+ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct irb *irb;
+ int is_cmd;
+
+ irb = this_cpu_ptr(&cio_irb);
+ is_cmd = !scsw_is_tm(&irb->scsw);
+ /* Check for unsolicited interrupt. */
+ if (!scsw_is_solicited(&irb->scsw)) {
+ if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+ !irb->esw.esw0.erw.cons) {
+ /* Unit check but no sense data. Need basic sense. */
+ if (ccw_device_do_sense(cdev, irb) != 0)
+ goto call_handler_unsol;
+ memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+ cdev->private->state = DEV_STATE_W4SENSE;
+ cdev->private->intparm = 0;
+ return;
+ }
+call_handler_unsol:
+ if (cdev->handler)
+ cdev->handler (cdev, 0, irb);
+ if (cdev->private->flags.doverify)
+ ccw_device_online_verify(cdev, 0);
+ return;
+ }
+ /* Accumulate status and find out if a basic sense is needed. */
+ ccw_device_accumulate_irb(cdev, irb);
+ if (is_cmd && cdev->private->flags.dosense) {
+ if (ccw_device_do_sense(cdev, irb) == 0) {
+ cdev->private->state = DEV_STATE_W4SENSE;
+ }
+ return;
+ }
+ /* Call the handler. */
+ if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+}
+
+/*
+ * Got an timeout in online state.
+ */
+static void
+ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->iretry = 255;
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, 3*HZ);
+ cdev->private->state = DEV_STATE_TIMEOUT_KILL;
+ return;
+ }
+ if (ret)
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ else if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-ETIMEDOUT));
+}
+
+/*
+ * Got an interrupt for a basic sense.
+ */
+static void
+ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct irb *irb;
+
+ irb = this_cpu_ptr(&cio_irb);
+ /* Check for unsolicited interrupt. */
+ if (scsw_stctl(&irb->scsw) ==
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
+ if (scsw_cc(&irb->scsw) == 1)
+ /* Basic sense hasn't started. Try again. */
+ ccw_device_do_sense(cdev, irb);
+ else {
+ CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
+ "interrupt during w4sense...\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (cdev->handler)
+ cdev->handler (cdev, 0, irb);
+ }
+ return;
+ }
+ /*
+ * Check if a halt or clear has been issued in the meanwhile. If yes,
+ * only deliver the halt/clear interrupt to the device driver as if it
+ * had killed the original request.
+ */
+ if (scsw_fctl(&irb->scsw) &
+ (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
+ cdev->private->flags.dosense = 0;
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+ ccw_device_accumulate_irb(cdev, irb);
+ goto call_handler;
+ }
+ /* Add basic sense info to irb. */
+ ccw_device_accumulate_basic_sense(cdev, irb);
+ if (cdev->private->flags.dosense) {
+ /* Another basic sense is needed. */
+ ccw_device_do_sense(cdev, irb);
+ return;
+ }
+call_handler:
+ cdev->private->state = DEV_STATE_ONLINE;
+ /* In case sensing interfered with setting the device online */
+ wake_up(&cdev->private->wait_q);
+ /* Call the handler. */
+ if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+}
+
+static void
+ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ ccw_device_set_timeout(cdev, 0);
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+ /* OK, i/o is dead now. Call interrupt handler. */
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-EIO));
+}
+
+static void
+ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, 3*HZ);
+ return;
+ }
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-EIO));
+}
+
+void ccw_device_kill_io(struct ccw_device *cdev)
+{
+ int ret;
+
+ cdev->private->iretry = 255;
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, 3*HZ);
+ cdev->private->state = DEV_STATE_TIMEOUT_KILL;
+ return;
+ }
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-EIO));
+}
+
+static void
+ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ /* Start verification after current task finished. */
+ cdev->private->flags.doverify = 1;
+}
+
+static void
+ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
+ /* Couldn't enable the subchannel for i/o. Sick device. */
+ return;
+ cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
+ ccw_device_sense_id_start(cdev);
+}
+
+void ccw_device_trigger_reprobe(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ if (cdev->private->state != DEV_STATE_DISCONNECTED)
+ return;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /* Update some values. */
+ if (cio_update_schib(sch))
+ return;
+ /*
+ * The pim, pam, pom values may not be accurate, but they are the best
+ * we have before performing device selection :/
+ */
+ sch->lpm = sch->schib.pmcw.pam & sch->opm;
+ /*
+ * Use the initial configuration since we can't be shure that the old
+ * paths are valid.
+ */
+ io_subchannel_init_config(sch);
+ if (cio_commit_config(sch))
+ return;
+
+ /* We should also udate ssd info, but this has to wait. */
+ /* Check if this is another device which appeared on the same sch. */
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
+ css_schedule_eval(sch->schid);
+ else
+ ccw_device_start_id(cdev, 0);
+}
+
+static void ccw_device_disabled_irq(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /*
+ * An interrupt in a disabled state means a previous disable was not
+ * successful - should not happen, but we try to disable again.
+ */
+ cio_disable_subchannel(sch);
+}
+
+static void
+ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ retry_set_schib(cdev);
+ cdev->private->state = DEV_STATE_ONLINE;
+ dev_fsm_event(cdev, dev_event);
+}
+
+static void ccw_device_update_cmfblock(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ cmf_retry_copy_block(cdev);
+ cdev->private->state = DEV_STATE_ONLINE;
+ dev_fsm_event(cdev, dev_event);
+}
+
+static void
+ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ wake_up(&cdev->private->wait_q);
+}
+
+static void
+ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, HZ/10);
+ } else {
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ wake_up(&cdev->private->wait_q);
+ }
+}
+
+/*
+ * No operation action. This is used e.g. to ignore a timeout event in
+ * state offline.
+ */
+static void
+ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
+{
+}
+
+/*
+ * device statemachine
+ */
+fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
+ [DEV_STATE_NOT_OPER] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_nop,
+ [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_SENSE_PGID] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_SENSE_ID] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_OFFLINE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
+ },
+ [DEV_STATE_VERIFY] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
+ },
+ [DEV_STATE_ONLINE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_online_verify,
+ },
+ [DEV_STATE_W4SENSE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_online_verify,
+ },
+ [DEV_STATE_DISBAND_PGID] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_BOXED] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_nop,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
+ },
+ /* states to wait for i/o completion before doing something */
+ [DEV_STATE_TIMEOUT_KILL] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
+ },
+ [DEV_STATE_QUIESCE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
+ [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
+ [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ /* special states for devices gone not operational */
+ [DEV_STATE_DISCONNECTED] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_nop,
+ [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_start_id,
+ },
+ [DEV_STATE_DISCONNECTED_SENSE_ID] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_CMFCHANGE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
+ [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
+ [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
+ [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
+ },
+ [DEV_STATE_CMFUPDATE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
+ [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
+ [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
+ [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
+ },
+ [DEV_STATE_STEAL_LOCK] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+};
+
+EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/kernel/drivers/s390/cio/device_id.c b/kernel/drivers/s390/cio/device_id.c
new file mode 100644
index 000000000..d4fa30541
--- /dev/null
+++ b/kernel/drivers/s390/cio/device_id.c
@@ -0,0 +1,222 @@
+/*
+ * CCW device SENSE ID I/O handling.
+ *
+ * Copyright IBM Corp. 2002, 2009
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/ccwdev.h>
+#include <asm/setup.h>
+#include <asm/cio.h>
+#include <asm/diag.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "device.h"
+#include "io_sch.h"
+
+#define SENSE_ID_RETRIES 256
+#define SENSE_ID_TIMEOUT (10 * HZ)
+#define SENSE_ID_MIN_LEN 4
+#define SENSE_ID_BASIC_LEN 7
+
+/**
+ * diag210_to_senseid - convert diag 0x210 data to sense id information
+ * @senseid: sense id
+ * @diag: diag 0x210 data
+ *
+ * Return 0 on success, non-zero otherwise.
+ */
+static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
+{
+ static struct {
+ int class, type, cu_type;
+ } vm_devices[] = {
+ { 0x08, 0x01, 0x3480 },
+ { 0x08, 0x02, 0x3430 },
+ { 0x08, 0x10, 0x3420 },
+ { 0x08, 0x42, 0x3424 },
+ { 0x08, 0x44, 0x9348 },
+ { 0x08, 0x81, 0x3490 },
+ { 0x08, 0x82, 0x3422 },
+ { 0x10, 0x41, 0x1403 },
+ { 0x10, 0x42, 0x3211 },
+ { 0x10, 0x43, 0x3203 },
+ { 0x10, 0x45, 0x3800 },
+ { 0x10, 0x47, 0x3262 },
+ { 0x10, 0x48, 0x3820 },
+ { 0x10, 0x49, 0x3800 },
+ { 0x10, 0x4a, 0x4245 },
+ { 0x10, 0x4b, 0x4248 },
+ { 0x10, 0x4d, 0x3800 },
+ { 0x10, 0x4e, 0x3820 },
+ { 0x10, 0x4f, 0x3820 },
+ { 0x10, 0x82, 0x2540 },
+ { 0x10, 0x84, 0x3525 },
+ { 0x20, 0x81, 0x2501 },
+ { 0x20, 0x82, 0x2540 },
+ { 0x20, 0x84, 0x3505 },
+ { 0x40, 0x01, 0x3278 },
+ { 0x40, 0x04, 0x3277 },
+ { 0x40, 0x80, 0x2250 },
+ { 0x40, 0xc0, 0x5080 },
+ { 0x80, 0x00, 0x3215 },
+ };
+ int i;
+
+ /* Special case for osa devices. */
+ if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
+ senseid->cu_type = 0x3088;
+ senseid->cu_model = 0x60;
+ senseid->reserved = 0xff;
+ return 0;
+ }
+ for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
+ if (diag->vrdcvcla == vm_devices[i].class &&
+ diag->vrdcvtyp == vm_devices[i].type) {
+ senseid->cu_type = vm_devices[i].cu_type;
+ senseid->reserved = 0xff;
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * diag_get_dev_info - retrieve device information via diag 0x210
+ * @cdev: ccw device
+ *
+ * Returns zero on success, non-zero otherwise.
+ */
+static int diag210_get_dev_info(struct ccw_device *cdev)
+{
+ struct ccw_dev_id *dev_id = &cdev->private->dev_id;
+ struct senseid *senseid = &cdev->private->senseid;
+ struct diag210 diag_data;
+ int rc;
+
+ if (dev_id->ssid != 0)
+ return -ENODEV;
+ memset(&diag_data, 0, sizeof(diag_data));
+ diag_data.vrdcdvno = dev_id->devno;
+ diag_data.vrdclen = sizeof(diag_data);
+ rc = diag210(&diag_data);
+ CIO_TRACE_EVENT(4, "diag210");
+ CIO_HEX_EVENT(4, &rc, sizeof(rc));
+ CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
+ if (rc != 0 && rc != 2)
+ goto err_failed;
+ if (diag210_to_senseid(senseid, &diag_data))
+ goto err_unknown;
+ return 0;
+
+err_unknown:
+ CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
+ dev_id->ssid, dev_id->devno);
+ return -ENODEV;
+err_failed:
+ CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
+ dev_id->ssid, dev_id->devno, rc);
+ return -ENODEV;
+}
+
+/*
+ * Initialize SENSE ID data.
+ */
+static void snsid_init(struct ccw_device *cdev)
+{
+ cdev->private->flags.esid = 0;
+ memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
+ cdev->private->senseid.cu_type = 0xffff;
+}
+
+/*
+ * Check for complete SENSE ID data.
+ */
+static int snsid_check(struct ccw_device *cdev, void *data)
+{
+ struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
+ int len = sizeof(struct senseid) - scsw->count;
+
+ /* Check for incomplete SENSE ID data. */
+ if (len < SENSE_ID_MIN_LEN)
+ goto out_restart;
+ if (cdev->private->senseid.cu_type == 0xffff)
+ goto out_restart;
+ /* Check for incompatible SENSE ID data. */
+ if (cdev->private->senseid.reserved != 0xff)
+ return -EOPNOTSUPP;
+ /* Check for extended-identification information. */
+ if (len > SENSE_ID_BASIC_LEN)
+ cdev->private->flags.esid = 1;
+ return 0;
+
+out_restart:
+ snsid_init(cdev);
+ return -EAGAIN;
+}
+
+/*
+ * Process SENSE ID request result.
+ */
+static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct senseid *senseid = &cdev->private->senseid;
+ int vm = 0;
+
+ if (rc && MACHINE_IS_VM) {
+ /* Try diag 0x210 fallback on z/VM. */
+ snsid_init(cdev);
+ if (diag210_get_dev_info(cdev) == 0) {
+ rc = 0;
+ vm = 1;
+ }
+ }
+ CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
+ "%04x/%02x%s\n", id->ssid, id->devno, rc,
+ senseid->cu_type, senseid->cu_model, senseid->dev_type,
+ senseid->dev_model, vm ? " (diag210)" : "");
+ ccw_device_sense_id_done(cdev, rc);
+}
+
+/**
+ * ccw_device_sense_id_start - perform SENSE ID
+ * @cdev: ccw device
+ *
+ * Execute a SENSE ID channel program on @cdev to update its sense id
+ * information. When finished, call ccw_device_sense_id_done with a
+ * return code specifying the result.
+ */
+void ccw_device_sense_id_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ CIO_TRACE_EVENT(4, "snsid");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Data setup. */
+ snsid_init(cdev);
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_ID;
+ cp->cda = (u32) (addr_t) &cdev->private->senseid;
+ cp->count = sizeof(struct senseid);
+ cp->flags = CCW_FLAG_SLI;
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->cp = cp;
+ req->timeout = SENSE_ID_TIMEOUT;
+ req->maxretries = SENSE_ID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->check = snsid_check;
+ req->callback = snsid_callback;
+ ccw_request_start(cdev);
+}
diff --git a/kernel/drivers/s390/cio/device_ops.c b/kernel/drivers/s390/cio/device_ops.c
new file mode 100644
index 000000000..f3c417943
--- /dev/null
+++ b/kernel/drivers/s390/cio/device_ops.c
@@ -0,0 +1,799 @@
+/*
+ * Copyright IBM Corp. 2002, 2009
+ *
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+
+#include <asm/ccwdev.h>
+#include <asm/idals.h>
+#include <asm/chpid.h>
+#include <asm/fcx.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "chsc.h"
+#include "device.h"
+#include "chp.h"
+
+/**
+ * ccw_device_set_options_mask() - set some options and unset the rest
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, all flags not specified in @flags
+ * are cleared.
+ * Returns:
+ * %0 on success, -%EINVAL on an invalid flag combination.
+ */
+int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
+{
+ /*
+ * The flag usage is mutal exclusive ...
+ */
+ if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
+ (flags & CCWDEV_REPORT_ALL))
+ return -EINVAL;
+ cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
+ cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
+ cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
+ cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
+ cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
+ return 0;
+}
+
+/**
+ * ccw_device_set_options() - set some options
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, the remainder is left untouched.
+ * Returns:
+ * %0 on success, -%EINVAL if an invalid flag combination would ensue.
+ */
+int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
+{
+ /*
+ * The flag usage is mutal exclusive ...
+ */
+ if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
+ (flags & CCWDEV_REPORT_ALL)) ||
+ ((flags & CCWDEV_EARLY_NOTIFICATION) &&
+ cdev->private->options.repall) ||
+ ((flags & CCWDEV_REPORT_ALL) &&
+ cdev->private->options.fast))
+ return -EINVAL;
+ cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
+ cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
+ cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
+ cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
+ cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
+ return 0;
+}
+
+/**
+ * ccw_device_clear_options() - clear some options
+ * @cdev: device for which the options are to be cleared
+ * @flags: options to be cleared
+ *
+ * All flags specified in @flags are cleared, the remainder is left untouched.
+ */
+void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
+{
+ cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
+ cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
+ cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
+ cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
+ cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
+}
+
+/**
+ * ccw_device_is_pathgroup - determine if paths to this device are grouped
+ * @cdev: ccw device
+ *
+ * Return non-zero if there is a path group, zero otherwise.
+ */
+int ccw_device_is_pathgroup(struct ccw_device *cdev)
+{
+ return cdev->private->flags.pgroup;
+}
+EXPORT_SYMBOL(ccw_device_is_pathgroup);
+
+/**
+ * ccw_device_is_multipath - determine if device is operating in multipath mode
+ * @cdev: ccw device
+ *
+ * Return non-zero if device is operating in multipath mode, zero otherwise.
+ */
+int ccw_device_is_multipath(struct ccw_device *cdev)
+{
+ return cdev->private->flags.mpath;
+}
+EXPORT_SYMBOL(ccw_device_is_multipath);
+
+/**
+ * ccw_device_clear() - terminate I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter; value is only used if no I/O is
+ * outstanding, otherwise the intparm associated with the I/O request
+ * is returned
+ *
+ * ccw_device_clear() calls csch on @cdev's subchannel.
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if (!cdev || !cdev->dev.parent)
+ return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_W4SENSE)
+ return -EINVAL;
+
+ ret = cio_clear(sch);
+ if (ret == 0)
+ cdev->private->intparm = intparm;
+ return ret;
+}
+
+/**
+ * ccw_device_start_key() - start a s390 channel program with key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, __u8 key,
+ unsigned long flags)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if (!cdev || !cdev->dev.parent)
+ return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
+ if (cdev->private->state != DEV_STATE_ONLINE ||
+ ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
+ !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
+ cdev->private->flags.doverify)
+ return -EBUSY;
+ ret = cio_set_options (sch, flags);
+ if (ret)
+ return ret;
+ /* Adjust requested path mask to exclude unusable paths. */
+ if (lpm) {
+ lpm &= sch->lpm;
+ if (lpm == 0)
+ return -EACCES;
+ }
+ ret = cio_start_key (sch, cpa, lpm, key);
+ switch (ret) {
+ case 0:
+ cdev->private->intparm = intparm;
+ break;
+ case -EACCES:
+ case -ENODEV:
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, __u8 key,
+ unsigned long flags, int expires)
+{
+ int ret;
+
+ if (!cdev)
+ return -ENODEV;
+ ccw_device_set_timeout(cdev, expires);
+ ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
+ if (ret != 0)
+ ccw_device_set_timeout(cdev, 0);
+ return ret;
+}
+
+/**
+ * ccw_device_start() - start a s390 channel program
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, unsigned long flags)
+{
+ return ccw_device_start_key(cdev, cpa, intparm, lpm,
+ PAGE_DEFAULT_KEY, flags);
+}
+
+/**
+ * ccw_device_start_timeout() - start a s390 channel program with timeout
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm,
+ unsigned long flags, int expires)
+{
+ return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
+ PAGE_DEFAULT_KEY, flags,
+ expires);
+}
+
+
+/**
+ * ccw_device_halt() - halt I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter; value is only used if no I/O is
+ * outstanding, otherwise the intparm associated with the I/O request
+ * is returned
+ *
+ * ccw_device_halt() calls hsch on @cdev's subchannel.
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state,
+ * -%EBUSY on device busy or interrupt pending.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if (!cdev || !cdev->dev.parent)
+ return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_W4SENSE)
+ return -EINVAL;
+
+ ret = cio_halt(sch);
+ if (ret == 0)
+ cdev->private->intparm = intparm;
+ return ret;
+}
+
+/**
+ * ccw_device_resume() - resume channel program execution
+ * @cdev: target ccw device
+ *
+ * ccw_device_resume() calls rsch on @cdev's subchannel.
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state,
+ * -%EBUSY on device busy or interrupt pending.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_resume(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ if (!cdev || !cdev->dev.parent)
+ return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state != DEV_STATE_ONLINE ||
+ !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+ return -EINVAL;
+ return cio_resume(sch);
+}
+
+/*
+ * Pass interrupt to device driver.
+ */
+int
+ccw_device_call_handler(struct ccw_device *cdev)
+{
+ unsigned int stctl;
+ int ending_status;
+
+ /*
+ * we allow for the device action handler if .
+ * - we received ending status
+ * - the action handler requested to see all interrupts
+ * - we received an intermediate status
+ * - fast notification was requested (primary status)
+ * - unsolicited interrupts
+ */
+ stctl = scsw_stctl(&cdev->private->irb.scsw);
+ ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
+ (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
+ (stctl == SCSW_STCTL_STATUS_PEND);
+ if (!ending_status &&
+ !cdev->private->options.repall &&
+ !(stctl & SCSW_STCTL_INTER_STATUS) &&
+ !(cdev->private->options.fast &&
+ (stctl & SCSW_STCTL_PRIM_STATUS)))
+ return 0;
+
+ /* Clear pending timers for device driver initiated I/O. */
+ if (ending_status)
+ ccw_device_set_timeout(cdev, 0);
+ /*
+ * Now we are ready to call the device driver interrupt handler.
+ */
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ &cdev->private->irb);
+
+ /*
+ * Clear the old and now useless interrupt response block.
+ */
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+ return 1;
+}
+
+/**
+ * ccw_device_get_ciw() - Search for CIW command in extended sense data.
+ * @cdev: ccw device to inspect
+ * @ct: command type to look for
+ *
+ * During SenseID, command information words (CIWs) describing special
+ * commands available to the device may have been stored in the extended
+ * sense data. This function searches for CIWs of a specified command
+ * type in the extended sense data.
+ * Returns:
+ * %NULL if no extended sense data has been stored or if no CIW of the
+ * specified command type could be found,
+ * else a pointer to the CIW of the specified command type.
+ */
+struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
+{
+ int ciw_cnt;
+
+ if (cdev->private->flags.esid == 0)
+ return NULL;
+ for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
+ if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
+ return cdev->private->senseid.ciw + ciw_cnt;
+ return NULL;
+}
+
+/**
+ * ccw_device_get_path_mask() - get currently available paths
+ * @cdev: ccw device to be queried
+ * Returns:
+ * %0 if no subchannel for the device is available,
+ * else the mask of currently available paths for the ccw device's subchannel.
+ */
+__u8 ccw_device_get_path_mask(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ if (!cdev->dev.parent)
+ return 0;
+
+ sch = to_subchannel(cdev->dev.parent);
+ return sch->lpm;
+}
+
+struct stlck_data {
+ struct completion done;
+ int rc;
+};
+
+void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
+{
+ struct stlck_data *sdata = data;
+
+ sdata->rc = rc;
+ complete(&sdata->done);
+}
+
+/*
+ * Perform unconditional reserve + release.
+ */
+int ccw_device_stlck(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct stlck_data data;
+ u8 *buffer;
+ int rc;
+
+ /* Check if steal lock operation is valid for this device. */
+ if (cdev->drv) {
+ if (!cdev->private->options.force)
+ return -EINVAL;
+ }
+ buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ init_completion(&data.done);
+ data.rc = -EIO;
+ spin_lock_irq(sch->lock);
+ rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
+ if (rc)
+ goto out_unlock;
+ /* Perform operation. */
+ cdev->private->state = DEV_STATE_STEAL_LOCK,
+ ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
+ spin_unlock_irq(sch->lock);
+ /* Wait for operation to finish. */
+ if (wait_for_completion_interruptible(&data.done)) {
+ /* Got a signal. */
+ spin_lock_irq(sch->lock);
+ ccw_request_cancel(cdev);
+ spin_unlock_irq(sch->lock);
+ wait_for_completion(&data.done);
+ }
+ rc = data.rc;
+ /* Check results. */
+ spin_lock_irq(sch->lock);
+ cio_disable_subchannel(sch);
+ cdev->private->state = DEV_STATE_BOXED;
+out_unlock:
+ spin_unlock_irq(sch->lock);
+ kfree(buffer);
+
+ return rc;
+}
+
+/**
+ * chp_get_chp_desc - return newly allocated channel-path descriptor
+ * @cdev: device to obtain the descriptor for
+ * @chp_idx: index of the channel path
+ *
+ * On success return a newly allocated copy of the channel-path description
+ * data associated with the given channel path. Return %NULL on error.
+ */
+struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev,
+ int chp_idx)
+{
+ struct subchannel *sch;
+ struct chp_id chpid;
+
+ sch = to_subchannel(cdev->dev.parent);
+ chp_id_init(&chpid);
+ chpid.id = sch->schib.pmcw.chpid[chp_idx];
+ return chp_get_chp_desc(chpid);
+}
+
+/**
+ * ccw_device_get_id - obtain a ccw device id
+ * @cdev: device to obtain the id for
+ * @dev_id: where to fill in the values
+ */
+void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
+{
+ *dev_id = cdev->private->dev_id;
+}
+EXPORT_SYMBOL(ccw_device_get_id);
+
+/**
+ * ccw_device_tm_start_key - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
+ unsigned long intparm, u8 lpm, u8 key)
+{
+ struct subchannel *sch;
+ int rc;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_TM_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EIO;
+ /* Adjust requested path mask to exclude unusable paths. */
+ if (lpm) {
+ lpm &= sch->lpm;
+ if (lpm == 0)
+ return -EACCES;
+ }
+ rc = cio_tm_start_key(sch, tcw, lpm, key);
+ if (rc == 0)
+ cdev->private->intparm = intparm;
+ return rc;
+}
+EXPORT_SYMBOL(ccw_device_tm_start_key);
+
+/**
+ * ccw_device_tm_start_timeout_key - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ * @expires: time span in jiffies after which to abort request
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
+ unsigned long intparm, u8 lpm, u8 key,
+ int expires)
+{
+ int ret;
+
+ ccw_device_set_timeout(cdev, expires);
+ ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
+ if (ret != 0)
+ ccw_device_set_timeout(cdev, 0);
+ return ret;
+}
+EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
+
+/**
+ * ccw_device_tm_start - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
+ unsigned long intparm, u8 lpm)
+{
+ return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
+ PAGE_DEFAULT_KEY);
+}
+EXPORT_SYMBOL(ccw_device_tm_start);
+
+/**
+ * ccw_device_tm_start_timeout - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @expires: time span in jiffies after which to abort request
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
+ unsigned long intparm, u8 lpm, int expires)
+{
+ return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
+ PAGE_DEFAULT_KEY, expires);
+}
+EXPORT_SYMBOL(ccw_device_tm_start_timeout);
+
+/**
+ * ccw_device_get_mdc - accumulate max data count
+ * @cdev: ccw device for which the max data count is accumulated
+ * @mask: mask of paths to use
+ *
+ * Return the number of 64K-bytes blocks all paths at least support
+ * for a transport command. Return values <= 0 indicate failures.
+ */
+int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct channel_path *chp;
+ struct chp_id chpid;
+ int mdc = 0, i;
+
+ /* Adjust requested path mask to excluded varied off paths. */
+ if (mask)
+ mask &= sch->lpm;
+ else
+ mask = sch->lpm;
+
+ chp_id_init(&chpid);
+ for (i = 0; i < 8; i++) {
+ if (!(mask & (0x80 >> i)))
+ continue;
+ chpid.id = sch->schib.pmcw.chpid[i];
+ chp = chpid_to_chp(chpid);
+ if (!chp)
+ continue;
+
+ mutex_lock(&chp->lock);
+ if (!chp->desc_fmt1.f) {
+ mutex_unlock(&chp->lock);
+ return 0;
+ }
+ if (!chp->desc_fmt1.r)
+ mdc = 1;
+ mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
+ chp->desc_fmt1.mdc;
+ mutex_unlock(&chp->lock);
+ }
+
+ return mdc;
+}
+EXPORT_SYMBOL(ccw_device_get_mdc);
+
+/**
+ * ccw_device_tm_intrg - perform interrogate function
+ * @cdev: ccw device on which to perform the interrogate function
+ *
+ * Perform an interrogate function on the given ccw device. Return zero on
+ * success, non-zero otherwise.
+ */
+int ccw_device_tm_intrg(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EIO;
+ if (!scsw_is_tm(&sch->schib.scsw) ||
+ !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
+ return -EINVAL;
+ return cio_tm_intrg(sch);
+}
+EXPORT_SYMBOL(ccw_device_tm_intrg);
+
+/**
+ * ccw_device_get_schid - obtain a subchannel id
+ * @cdev: device to obtain the id for
+ * @schid: where to fill in the values
+ */
+void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ *schid = sch->schid;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_schid);
+
+MODULE_LICENSE("GPL");
+EXPORT_SYMBOL(ccw_device_set_options_mask);
+EXPORT_SYMBOL(ccw_device_set_options);
+EXPORT_SYMBOL(ccw_device_clear_options);
+EXPORT_SYMBOL(ccw_device_clear);
+EXPORT_SYMBOL(ccw_device_halt);
+EXPORT_SYMBOL(ccw_device_resume);
+EXPORT_SYMBOL(ccw_device_start_timeout);
+EXPORT_SYMBOL(ccw_device_start);
+EXPORT_SYMBOL(ccw_device_start_timeout_key);
+EXPORT_SYMBOL(ccw_device_start_key);
+EXPORT_SYMBOL(ccw_device_get_ciw);
+EXPORT_SYMBOL(ccw_device_get_path_mask);
+EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
diff --git a/kernel/drivers/s390/cio/device_pgid.c b/kernel/drivers/s390/cio/device_pgid.c
new file mode 100644
index 000000000..37ada05e8
--- /dev/null
+++ b/kernel/drivers/s390/cio/device_pgid.c
@@ -0,0 +1,669 @@
+/*
+ * CCW device PGID and path verification I/O handling.
+ *
+ * Copyright IBM Corp. 2002, 2009
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "device.h"
+#include "io_sch.h"
+
+#define PGID_RETRIES 256
+#define PGID_TIMEOUT (10 * HZ)
+
+static void verify_start(struct ccw_device *cdev);
+
+/*
+ * Process path verification data and report result.
+ */
+static void verify_done(struct ccw_device *cdev, int rc)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ int mpath = cdev->private->flags.mpath;
+ int pgroup = cdev->private->flags.pgroup;
+
+ if (rc)
+ goto out;
+ /* Ensure consistent multipathing state at device and channel. */
+ if (sch->config.mp != mpath) {
+ sch->config.mp = mpath;
+ rc = cio_commit_config(sch);
+ }
+out:
+ CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
+ "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
+ sch->vpm);
+ ccw_device_verify_done(cdev, rc);
+}
+
+/*
+ * Create channel program to perform a NOOP.
+ */
+static void nop_build_cp(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ cp->cmd_code = CCW_CMD_NOOP;
+ cp->cda = 0;
+ cp->count = 0;
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
+}
+
+/*
+ * Perform NOOP on a single path.
+ */
+static void nop_do(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
+ ~cdev->private->path_noirq_mask);
+ if (!req->lpm)
+ goto out_nopath;
+ nop_build_cp(cdev);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ verify_done(cdev, sch->vpm ? 0 : -EACCES);
+}
+
+/*
+ * Adjust NOOP I/O status.
+ */
+static enum io_status nop_filter(struct ccw_device *cdev, void *data,
+ struct irb *irb, enum io_status status)
+{
+ /* Only subchannel status might indicate a path error. */
+ if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
+ return IO_DONE;
+ return status;
+}
+
+/*
+ * Process NOOP request result for a single path.
+ */
+static void nop_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ switch (rc) {
+ case 0:
+ sch->vpm |= req->lpm;
+ break;
+ case -ETIME:
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
+ goto err;
+ }
+ /* Continue on the next path. */
+ req->lpm >>= 1;
+ nop_do(cdev);
+ return;
+
+err:
+ verify_done(cdev, rc);
+}
+
+/*
+ * Create channel program to perform SET PGID on a single path.
+ */
+static void spid_build_cp(struct ccw_device *cdev, u8 fn)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+ int i = 8 - ffs(req->lpm);
+ struct pgid *pgid = &cdev->private->pgid[i];
+
+ pgid->inf.fc = fn;
+ cp->cmd_code = CCW_CMD_SET_PGID;
+ cp->cda = (u32) (addr_t) pgid;
+ cp->count = sizeof(*pgid);
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
+}
+
+static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ if (rc) {
+ /* We don't know the path groups' state. Abort. */
+ verify_done(cdev, rc);
+ return;
+ }
+ /*
+ * Path groups have been reset. Restart path verification but
+ * leave paths in path_noirq_mask out.
+ */
+ cdev->private->flags.pgid_unknown = 0;
+ verify_start(cdev);
+}
+
+/*
+ * Reset pathgroups and restart path verification, leave unusable paths out.
+ */
+static void pgid_wipeout_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
+ id->ssid, id->devno, cdev->private->pgid_valid_mask,
+ cdev->private->path_noirq_mask);
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam;
+ req->callback = pgid_wipeout_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
+
+/*
+ * Perform establish/resign SET PGID on a single path.
+ */
+static void spid_do(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ /* Use next available path that is not already in correct state. */
+ req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
+ if (!req->lpm)
+ goto out_nopath;
+ /* Channel program setup. */
+ if (req->lpm & sch->opm)
+ fn = SPID_FUNC_ESTABLISH;
+ else
+ fn = SPID_FUNC_RESIGN;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ if (cdev->private->flags.pgid_unknown) {
+ /* At least one SPID could be partially done. */
+ pgid_wipeout_start(cdev);
+ return;
+ }
+ verify_done(cdev, sch->vpm ? 0 : -EACCES);
+}
+
+/*
+ * Process SET PGID request result for a single path.
+ */
+static void spid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ switch (rc) {
+ case 0:
+ sch->vpm |= req->lpm & sch->opm;
+ break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ case -EOPNOTSUPP:
+ if (cdev->private->flags.mpath) {
+ /* Try without multipathing. */
+ cdev->private->flags.mpath = 0;
+ goto out_restart;
+ }
+ /* Try without pathgrouping. */
+ cdev->private->flags.pgroup = 0;
+ goto out_restart;
+ default:
+ goto err;
+ }
+ req->lpm >>= 1;
+ spid_do(cdev);
+ return;
+
+out_restart:
+ verify_start(cdev);
+ return;
+err:
+ verify_done(cdev, rc);
+}
+
+static void spid_start(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = 0x80;
+ req->singlepath = 1;
+ req->callback = spid_callback;
+ spid_do(cdev);
+}
+
+static int pgid_is_reset(struct pgid *p)
+{
+ char *c;
+
+ for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
+ if (*c != 0)
+ return 0;
+ }
+ return 1;
+}
+
+static int pgid_cmp(struct pgid *p1, struct pgid *p2)
+{
+ return memcmp((char *) p1 + 1, (char *) p2 + 1,
+ sizeof(struct pgid) - 1);
+}
+
+/*
+ * Determine pathgroup state from PGID data.
+ */
+static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+ int *mismatch, u8 *reserved, u8 *reset)
+{
+ struct pgid *pgid = &cdev->private->pgid[0];
+ struct pgid *first = NULL;
+ int lpm;
+ int i;
+
+ *mismatch = 0;
+ *reserved = 0;
+ *reset = 0;
+ for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
+ continue;
+ if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
+ *reserved |= lpm;
+ if (pgid_is_reset(pgid)) {
+ *reset |= lpm;
+ continue;
+ }
+ if (!first) {
+ first = pgid;
+ continue;
+ }
+ if (pgid_cmp(pgid, first) != 0)
+ *mismatch = 1;
+ }
+ if (!first)
+ first = &channel_subsystems[0]->global_pgid;
+ *p = first;
+}
+
+static u8 pgid_to_donepm(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int i;
+ int lpm;
+ u8 donepm = 0;
+
+ /* Set bits for paths which are already in the target state. */
+ for (i = 0; i < 8; i++) {
+ lpm = 0x80 >> i;
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
+ continue;
+ pgid = &cdev->private->pgid[i];
+ if (sch->opm & lpm) {
+ if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
+ continue;
+ } else {
+ if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
+ continue;
+ }
+ if (cdev->private->flags.mpath) {
+ if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
+ continue;
+ } else {
+ if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
+ continue;
+ }
+ donepm |= lpm;
+ }
+
+ return donepm;
+}
+
+static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
+}
+
+/*
+ * Process SENSE PGID data and report result.
+ */
+static void snid_done(struct ccw_device *cdev, int rc)
+{
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int mismatch = 0;
+ u8 reserved = 0;
+ u8 reset = 0;
+ u8 donepm;
+
+ if (rc)
+ goto out;
+ pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
+ if (reserved == cdev->private->pgid_valid_mask)
+ rc = -EUSERS;
+ else if (mismatch)
+ rc = -EOPNOTSUPP;
+ else {
+ donepm = pgid_to_donepm(cdev);
+ sch->vpm = donepm & sch->opm;
+ cdev->private->pgid_reset_mask |= reset;
+ cdev->private->pgid_todo_mask &=
+ ~(donepm | cdev->private->path_noirq_mask);
+ pgid_fill(cdev, pgid);
+ }
+out:
+ CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
+ "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
+ id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
+ cdev->private->pgid_todo_mask, mismatch, reserved, reset);
+ switch (rc) {
+ case 0:
+ if (cdev->private->flags.pgid_unknown) {
+ pgid_wipeout_start(cdev);
+ return;
+ }
+ /* Anything left to do? */
+ if (cdev->private->pgid_todo_mask == 0) {
+ verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
+ return;
+ }
+ /* Perform path-grouping. */
+ spid_start(cdev);
+ break;
+ case -EOPNOTSUPP:
+ /* Path-grouping not supported. */
+ cdev->private->flags.pgroup = 0;
+ cdev->private->flags.mpath = 0;
+ verify_start(cdev);
+ break;
+ default:
+ verify_done(cdev, rc);
+ }
+}
+
+/*
+ * Create channel program to perform a SENSE PGID on a single path.
+ */
+static void snid_build_cp(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+ int i = 8 - ffs(req->lpm);
+
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_PGID;
+ cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
+ cp->count = sizeof(struct pgid);
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
+}
+
+/*
+ * Perform SENSE PGID on a single path.
+ */
+static void snid_do(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int ret;
+
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
+ ~cdev->private->path_noirq_mask);
+ if (!req->lpm)
+ goto out_nopath;
+ snid_build_cp(cdev);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ if (cdev->private->pgid_valid_mask)
+ ret = 0;
+ else if (cdev->private->path_noirq_mask)
+ ret = -ETIME;
+ else
+ ret = -EACCES;
+ snid_done(cdev, ret);
+}
+
+/*
+ * Process SENSE PGID request result for single path.
+ */
+static void snid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ switch (rc) {
+ case 0:
+ cdev->private->pgid_valid_mask |= req->lpm;
+ break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
+ goto err;
+ }
+ /* Continue on the next path. */
+ req->lpm >>= 1;
+ snid_do(cdev);
+ return;
+
+err:
+ snid_done(cdev, rc);
+}
+
+/*
+ * Perform path verification.
+ */
+static void verify_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw_dev_id *devid = &cdev->private->dev_id;
+
+ sch->vpm = 0;
+ sch->lpm = sch->schib.pmcw.pam;
+
+ /* Initialize PGID data. */
+ memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ cdev->private->pgid_valid_mask = 0;
+ cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
+ cdev->private->path_notoper_mask = 0;
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = 0x80;
+ req->singlepath = 1;
+ if (cdev->private->flags.pgroup) {
+ CIO_TRACE_EVENT(4, "snid");
+ CIO_HEX_EVENT(4, devid, sizeof(*devid));
+ req->callback = snid_callback;
+ snid_do(cdev);
+ } else {
+ CIO_TRACE_EVENT(4, "nop");
+ CIO_HEX_EVENT(4, devid, sizeof(*devid));
+ req->filter = nop_filter;
+ req->callback = nop_callback;
+ nop_do(cdev);
+ }
+}
+
+/**
+ * ccw_device_verify_start - perform path verification
+ * @cdev: ccw device
+ *
+ * Perform an I/O on each available channel path to @cdev to determine which
+ * paths are operational. The resulting path mask is stored in sch->vpm.
+ * If device options specify pathgrouping, establish a pathgroup for the
+ * operational paths. When finished, call ccw_device_verify_done with a
+ * return code specifying the result.
+ */
+void ccw_device_verify_start(struct ccw_device *cdev)
+{
+ CIO_TRACE_EVENT(4, "vrfy");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /*
+ * Initialize pathgroup and multipath state with target values.
+ * They may change in the course of path verification.
+ */
+ cdev->private->flags.pgroup = cdev->private->options.pgroup;
+ cdev->private->flags.mpath = cdev->private->options.mpath;
+ cdev->private->flags.doverify = 0;
+ cdev->private->path_noirq_mask = 0;
+ verify_start(cdev);
+}
+
+/*
+ * Process disband SET PGID request result.
+ */
+static void disband_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+
+ if (rc)
+ goto out;
+ /* Ensure consistent multipathing state at device and channel. */
+ cdev->private->flags.mpath = 0;
+ if (sch->config.mp) {
+ sch->config.mp = 0;
+ rc = cio_commit_config(sch);
+ }
+out:
+ CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
+ rc);
+ ccw_device_disband_done(cdev, rc);
+}
+
+/**
+ * ccw_device_disband_start - disband pathgroup
+ * @cdev: ccw device
+ *
+ * Execute a SET PGID channel program on @cdev to disband a previously
+ * established pathgroup. When finished, call ccw_device_disband_done with
+ * a return code specifying the result.
+ */
+void ccw_device_disband_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_TRACE_EVENT(4, "disb");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->singlepath = 1;
+ req->callback = disband_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
+
+static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->iccws;
+
+ cp[0].cmd_code = CCW_CMD_STLCK;
+ cp[0].cda = (u32) (addr_t) buf1;
+ cp[0].count = 32;
+ cp[0].flags = CCW_FLAG_CC;
+ cp[1].cmd_code = CCW_CMD_RELEASE;
+ cp[1].cda = (u32) (addr_t) buf2;
+ cp[1].count = 32;
+ cp[1].flags = 0;
+ req->cp = cp;
+}
+
+static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ ccw_device_stlck_done(cdev, data, rc);
+}
+
+/**
+ * ccw_device_stlck_start - perform unconditional release
+ * @cdev: ccw device
+ * @data: data pointer to be passed to ccw_device_stlck_done
+ * @buf1: data pointer used in channel program
+ * @buf2: data pointer used in channel program
+ *
+ * Execute a channel program on @cdev to release an existing PGID reservation.
+ * When finished, call ccw_device_stlck_done with a return code specifying the
+ * result.
+ */
+void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
+ void *buf2)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ CIO_TRACE_EVENT(4, "stlck");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->data = data;
+ req->callback = stlck_callback;
+ stlck_build_cp(cdev, buf1, buf2);
+ ccw_request_start(cdev);
+}
+
diff --git a/kernel/drivers/s390/cio/device_status.c b/kernel/drivers/s390/cio/device_status.c
new file mode 100644
index 000000000..15b56a15d
--- /dev/null
+++ b/kernel/drivers/s390/cio/device_status.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright IBM Corp. 2002
+ * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Status accumulation and basic sense functions.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "io_sch.h"
+
+/*
+ * Check for any kind of channel or interface control check but don't
+ * issue the message for the console device
+ */
+static void
+ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
+{
+ char dbf_text[15];
+
+ if (!scsw_is_valid_cstat(&irb->scsw) ||
+ !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
+ SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
+ return;
+ CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
+ "received"
+ " ... device %04x on subchannel 0.%x.%04x, dev_stat "
+ ": %02X sch_stat : %02X\n",
+ cdev->private->dev_id.devno, cdev->private->schid.ssid,
+ cdev->private->schid.sch_no,
+ scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
+ sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
+ CIO_TRACE_EVENT(0, dbf_text);
+ CIO_HEX_EVENT(0, irb, sizeof(struct irb));
+}
+
+/*
+ * Some paths became not operational (pno bit in scsw is set).
+ */
+static void
+ccw_device_path_notoper(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (cio_update_schib(sch))
+ goto doverify;
+
+ CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
+ "not operational \n", __func__,
+ sch->schid.ssid, sch->schid.sch_no,
+ sch->schib.pmcw.pnom);
+
+ sch->lpm &= ~sch->schib.pmcw.pnom;
+doverify:
+ cdev->private->flags.doverify = 1;
+}
+
+/*
+ * Copy valid bits from the extended control word to device irb.
+ */
+static void
+ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
+{
+ /*
+ * Copy extended control bit if it is valid... yes there
+ * are condition that have to be met for the extended control
+ * bit to have meaning. Sick.
+ */
+ cdev->private->irb.scsw.cmd.ectl = 0;
+ if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
+ !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
+ cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
+ /* Check if extended control word is valid. */
+ if (!cdev->private->irb.scsw.cmd.ectl)
+ return;
+ /* Copy concurrent sense / model dependent information. */
+ memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
+}
+
+/*
+ * Check if extended status word is valid.
+ */
+static int
+ccw_device_accumulate_esw_valid(struct irb *irb)
+{
+ if (!irb->scsw.cmd.eswf &&
+ (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
+ return 0;
+ if (irb->scsw.cmd.stctl ==
+ (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
+ !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+ return 0;
+ return 1;
+}
+
+/*
+ * Copy valid bits from the extended status word to device irb.
+ */
+static void
+ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
+{
+ struct irb *cdev_irb;
+ struct sublog *cdev_sublog, *sublog;
+
+ if (!ccw_device_accumulate_esw_valid(irb))
+ return;
+
+ cdev_irb = &cdev->private->irb;
+
+ /* Copy last path used mask. */
+ cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
+
+ /* Copy subchannel logout information if esw is of format 0. */
+ if (irb->scsw.cmd.eswf) {
+ cdev_sublog = &cdev_irb->esw.esw0.sublog;
+ sublog = &irb->esw.esw0.sublog;
+ /* Copy extended status flags. */
+ cdev_sublog->esf = sublog->esf;
+ /*
+ * Copy fields that have a meaning for channel data check
+ * channel control check and interface control check.
+ */
+ if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
+ SCHN_STAT_CHN_CTRL_CHK |
+ SCHN_STAT_INTF_CTRL_CHK)) {
+ /* Copy ancillary report bit. */
+ cdev_sublog->arep = sublog->arep;
+ /* Copy field-validity-flags. */
+ cdev_sublog->fvf = sublog->fvf;
+ /* Copy storage access code. */
+ cdev_sublog->sacc = sublog->sacc;
+ /* Copy termination code. */
+ cdev_sublog->termc = sublog->termc;
+ /* Copy sequence code. */
+ cdev_sublog->seqc = sublog->seqc;
+ }
+ /* Copy device status check. */
+ cdev_sublog->devsc = sublog->devsc;
+ /* Copy secondary error. */
+ cdev_sublog->serr = sublog->serr;
+ /* Copy i/o-error alert. */
+ cdev_sublog->ioerr = sublog->ioerr;
+ /* Copy channel path timeout bit. */
+ if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
+ cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
+ /* Copy failing storage address validity flag. */
+ cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
+ if (cdev_irb->esw.esw0.erw.fsavf) {
+ /* ... and copy the failing storage address. */
+ memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
+ sizeof (irb->esw.esw0.faddr));
+ /* ... and copy the failing storage address format. */
+ cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
+ }
+ /* Copy secondary ccw address validity bit. */
+ cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
+ if (irb->esw.esw0.erw.scavf)
+ /* ... and copy the secondary ccw address. */
+ cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
+
+ }
+ /* FIXME: DCTI for format 2? */
+
+ /* Copy authorization bit. */
+ cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
+ /* Copy path verification required flag. */
+ cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
+ if (irb->esw.esw0.erw.pvrf)
+ cdev->private->flags.doverify = 1;
+ /* Copy concurrent sense bit. */
+ cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
+ if (irb->esw.esw0.erw.cons)
+ cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
+}
+
+/*
+ * Accumulate status from irb to devstat.
+ */
+void
+ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
+{
+ struct irb *cdev_irb;
+
+ /*
+ * Check if the status pending bit is set in stctl.
+ * If not, the remaining bit have no meaning and we must ignore them.
+ * The esw is not meaningful as well...
+ */
+ if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
+ return;
+
+ /* Check for channel checks and interface control checks. */
+ ccw_device_msg_control_check(cdev, irb);
+
+ /* Check for path not operational. */
+ if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
+ ccw_device_path_notoper(cdev);
+ /* No irb accumulation for transport mode irbs. */
+ if (scsw_is_tm(&irb->scsw)) {
+ memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+ return;
+ }
+ /*
+ * Don't accumulate unsolicited interrupts.
+ */
+ if (!scsw_is_solicited(&irb->scsw))
+ return;
+
+ cdev_irb = &cdev->private->irb;
+
+ /*
+ * If the clear function had been performed, all formerly pending
+ * status at the subchannel has been cleared and we must not pass
+ * intermediate accumulated status to the device driver.
+ */
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
+ memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+ /* Copy bits which are valid only for the start function. */
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
+ /* Copy key. */
+ cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
+ /* Copy suspend control bit. */
+ cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
+ /* Accumulate deferred condition code. */
+ cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
+ /* Copy ccw format bit. */
+ cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
+ /* Copy prefetch bit. */
+ cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
+ /* Copy initial-status-interruption-control. */
+ cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
+ /* Copy address limit checking control. */
+ cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
+ /* Copy suppress suspend bit. */
+ cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
+ }
+
+ /* Take care of the extended control bit and extended control word. */
+ ccw_device_accumulate_ecw(cdev, irb);
+
+ /* Accumulate function control. */
+ cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
+ /* Copy activity control. */
+ cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
+ /* Accumulate status control. */
+ cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
+ /*
+ * Copy ccw address if it is valid. This is a bit simplified
+ * but should be close enough for all practical purposes.
+ */
+ if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
+ ((irb->scsw.cmd.stctl ==
+ (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
+ (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
+ (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
+ (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+ cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
+ /* Accumulate device status, but not the device busy flag. */
+ cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
+ /* dstat is not always valid. */
+ if (irb->scsw.cmd.stctl &
+ (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
+ | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
+ cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
+ /* Accumulate subchannel status. */
+ cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
+ /* Copy residual count if it is valid. */
+ if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
+ (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
+ == 0)
+ cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
+
+ /* Take care of bits in the extended status word. */
+ ccw_device_accumulate_esw(cdev, irb);
+
+ /*
+ * Check whether we must issue a SENSE CCW ourselves if there is no
+ * concurrent sense facility installed for the subchannel.
+ * No sense is required if no delayed sense is pending
+ * and we did not get a unit check without sense information.
+ *
+ * Note: We should check for ioinfo[irq]->flags.consns but VM
+ * violates the ESA/390 architecture and doesn't present an
+ * operand exception for virtual devices without concurrent
+ * sense facility available/supported when enabling the
+ * concurrent sense facility.
+ */
+ if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+ !(cdev_irb->esw.esw0.erw.cons))
+ cdev->private->flags.dosense = 1;
+}
+
+/*
+ * Do a basic sense.
+ */
+int
+ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
+{
+ struct subchannel *sch;
+ struct ccw1 *sense_ccw;
+ int rc;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ /* A sense is required, can we do it now ? */
+ if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
+ /*
+ * we received an Unit Check but we have no final
+ * status yet, therefore we must delay the SENSE
+ * processing. We must not report this intermediate
+ * status to the device interrupt handler.
+ */
+ return -EBUSY;
+
+ /*
+ * We have ending status but no sense information. Do a basic sense.
+ */
+ sense_ccw = &to_io_private(sch)->sense_ccw;
+ sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
+ sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
+ sense_ccw->count = SENSE_MAX_COUNT;
+ sense_ccw->flags = CCW_FLAG_SLI;
+
+ rc = cio_start(sch, sense_ccw, 0xff);
+ if (rc == -ENODEV || rc == -EACCES)
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ return rc;
+}
+
+/*
+ * Add information from basic sense to devstat.
+ */
+void
+ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
+{
+ /*
+ * Check if the status pending bit is set in stctl.
+ * If not, the remaining bit have no meaning and we must ignore them.
+ * The esw is not meaningful as well...
+ */
+ if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
+ return;
+
+ /* Check for channel checks and interface control checks. */
+ ccw_device_msg_control_check(cdev, irb);
+
+ /* Check for path not operational. */
+ if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
+ ccw_device_path_notoper(cdev);
+
+ if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+ (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
+ cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->flags.dosense = 0;
+ }
+ /* Check if path verification is required. */
+ if (ccw_device_accumulate_esw_valid(irb) &&
+ irb->esw.esw0.erw.pvrf)
+ cdev->private->flags.doverify = 1;
+}
+
+/*
+ * This function accumulates the status into the private devstat and
+ * starts a basic sense if one is needed.
+ */
+int
+ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
+{
+ ccw_device_accumulate_irb(cdev, irb);
+ if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
+ return -EBUSY;
+ /* Check for basic sense. */
+ if (cdev->private->flags.dosense &&
+ !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
+ cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->flags.dosense = 0;
+ return 0;
+ }
+ if (cdev->private->flags.dosense) {
+ ccw_device_do_sense(cdev, irb);
+ return -EBUSY;
+ }
+ return 0;
+}
+
diff --git a/kernel/drivers/s390/cio/eadm_sch.c b/kernel/drivers/s390/cio/eadm_sch.c
new file mode 100644
index 000000000..bee8c11cd
--- /dev/null
+++ b/kernel/drivers/s390/cio/eadm_sch.c
@@ -0,0 +1,418 @@
+/*
+ * Driver for s390 eadm subchannels
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include <asm/css_chars.h>
+#include <asm/debug.h>
+#include <asm/isc.h>
+#include <asm/cio.h>
+#include <asm/scsw.h>
+#include <asm/eadm.h>
+
+#include "eadm_sch.h"
+#include "ioasm.h"
+#include "cio.h"
+#include "css.h"
+#include "orb.h"
+
+MODULE_DESCRIPTION("driver for s390 eadm subchannels");
+MODULE_LICENSE("GPL");
+
+#define EADM_TIMEOUT (7 * HZ)
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(eadm_list);
+
+static debug_info_t *eadm_debug;
+
+#define EADM_LOG(imp, txt) do { \
+ debug_text_event(eadm_debug, imp, txt); \
+ } while (0)
+
+static void EADM_LOG_HEX(int level, void *data, int length)
+{
+ if (!debug_level_enabled(eadm_debug, level))
+ return;
+ while (length > 0) {
+ debug_event(eadm_debug, level, data, length);
+ length -= eadm_debug->buf_size;
+ data += eadm_debug->buf_size;
+ }
+}
+
+static void orb_init(union orb *orb)
+{
+ memset(orb, 0, sizeof(union orb));
+ orb->eadm.compat1 = 1;
+ orb->eadm.compat2 = 1;
+ orb->eadm.fmt = 1;
+ orb->eadm.x = 1;
+}
+
+static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
+{
+ union orb *orb = &get_eadm_private(sch)->orb;
+ int cc;
+
+ orb_init(orb);
+ orb->eadm.aob = (u32)__pa(aob);
+ orb->eadm.intparm = (u32)(addr_t)sch;
+ orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
+
+ EADM_LOG(6, "start");
+ EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
+
+ cc = ssch(sch->schid, orb);
+ switch (cc) {
+ case 0:
+ sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
+ break;
+ case 1: /* status pending */
+ case 2: /* busy */
+ return -EBUSY;
+ case 3: /* not operational */
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int eadm_subchannel_clear(struct subchannel *sch)
+{
+ int cc;
+
+ cc = csch(sch->schid);
+ if (cc)
+ return -ENODEV;
+
+ sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
+ return 0;
+}
+
+static void eadm_subchannel_timeout(unsigned long data)
+{
+ struct subchannel *sch = (struct subchannel *) data;
+
+ spin_lock_irq(sch->lock);
+ EADM_LOG(1, "timeout");
+ EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
+ if (eadm_subchannel_clear(sch))
+ EADM_LOG(0, "clear failed");
+ spin_unlock_irq(sch->lock);
+}
+
+static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+
+ if (expires == 0) {
+ del_timer(&private->timer);
+ return;
+ }
+ if (timer_pending(&private->timer)) {
+ if (mod_timer(&private->timer, jiffies + expires))
+ return;
+ }
+ private->timer.function = eadm_subchannel_timeout;
+ private->timer.data = (unsigned long) sch;
+ private->timer.expires = jiffies + expires;
+ add_timer(&private->timer);
+}
+
+static void eadm_subchannel_irq(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+ struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
+ struct irb *irb = this_cpu_ptr(&cio_irb);
+ int error = 0;
+
+ EADM_LOG(6, "irq");
+ EADM_LOG_HEX(6, irb, sizeof(*irb));
+
+ inc_irq_stat(IRQIO_ADM);
+
+ if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
+ && scsw->eswf == 1 && irb->esw.eadm.erw.r)
+ error = -EIO;
+
+ if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
+ error = -ETIMEDOUT;
+
+ eadm_subchannel_set_timeout(sch, 0);
+
+ if (private->state != EADM_BUSY) {
+ EADM_LOG(1, "irq unsol");
+ EADM_LOG_HEX(1, irb, sizeof(*irb));
+ private->state = EADM_NOT_OPER;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+ return;
+ }
+ scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
+ private->state = EADM_IDLE;
+
+ if (private->completion)
+ complete(private->completion);
+}
+
+static struct subchannel *eadm_get_idle_sch(void)
+{
+ struct eadm_private *private;
+ struct subchannel *sch;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry(private, &eadm_list, head) {
+ sch = private->sch;
+ spin_lock(sch->lock);
+ if (private->state == EADM_IDLE) {
+ private->state = EADM_BUSY;
+ list_move_tail(&private->head, &eadm_list);
+ spin_unlock(sch->lock);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return sch;
+ }
+ spin_unlock(sch->lock);
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return NULL;
+}
+
+int eadm_start_aob(struct aob *aob)
+{
+ struct eadm_private *private;
+ struct subchannel *sch;
+ unsigned long flags;
+ int ret;
+
+ sch = eadm_get_idle_sch();
+ if (!sch)
+ return -EBUSY;
+
+ spin_lock_irqsave(sch->lock, flags);
+ eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
+ ret = eadm_subchannel_start(sch, aob);
+ if (!ret)
+ goto out_unlock;
+
+ /* Handle start subchannel failure. */
+ eadm_subchannel_set_timeout(sch, 0);
+ private = get_eadm_private(sch);
+ private->state = EADM_NOT_OPER;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(eadm_start_aob);
+
+static int eadm_subchannel_probe(struct subchannel *sch)
+{
+ struct eadm_private *private;
+ int ret;
+
+ private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+ if (!private)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&private->head);
+ init_timer(&private->timer);
+
+ spin_lock_irq(sch->lock);
+ set_eadm_private(sch, private);
+ private->state = EADM_IDLE;
+ private->sch = sch;
+ sch->isc = EADM_SCH_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret) {
+ set_eadm_private(sch, NULL);
+ spin_unlock_irq(sch->lock);
+ kfree(private);
+ goto out;
+ }
+ spin_unlock_irq(sch->lock);
+
+ spin_lock_irq(&list_lock);
+ list_add(&private->head, &eadm_list);
+ spin_unlock_irq(&list_lock);
+
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+out:
+ return ret;
+}
+
+static void eadm_quiesce(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+ DECLARE_COMPLETION_ONSTACK(completion);
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ if (private->state != EADM_BUSY)
+ goto disable;
+
+ if (eadm_subchannel_clear(sch))
+ goto disable;
+
+ private->completion = &completion;
+ spin_unlock_irq(sch->lock);
+
+ wait_for_completion_io(&completion);
+
+ spin_lock_irq(sch->lock);
+ private->completion = NULL;
+
+disable:
+ eadm_subchannel_set_timeout(sch, 0);
+ do {
+ ret = cio_disable_subchannel(sch);
+ } while (ret == -EBUSY);
+
+ spin_unlock_irq(sch->lock);
+}
+
+static int eadm_subchannel_remove(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+
+ spin_lock_irq(&list_lock);
+ list_del(&private->head);
+ spin_unlock_irq(&list_lock);
+
+ eadm_quiesce(sch);
+
+ spin_lock_irq(sch->lock);
+ set_eadm_private(sch, NULL);
+ spin_unlock_irq(sch->lock);
+
+ kfree(private);
+
+ return 0;
+}
+
+static void eadm_subchannel_shutdown(struct subchannel *sch)
+{
+ eadm_quiesce(sch);
+}
+
+static int eadm_subchannel_freeze(struct subchannel *sch)
+{
+ return cio_disable_subchannel(sch);
+}
+
+static int eadm_subchannel_restore(struct subchannel *sch)
+{
+ return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
+/**
+ * eadm_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel. Return zero when the
+ * event has been handled sufficiently or -EAGAIN when this function should
+ * be called again in process context.
+ */
+static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
+{
+ struct eadm_private *private;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+ goto out_unlock;
+
+ if (work_pending(&sch->todo_work))
+ goto out_unlock;
+
+ if (cio_update_schib(sch)) {
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ goto out_unlock;
+ }
+ private = get_eadm_private(sch);
+ if (private->state == EADM_NOT_OPER)
+ private->state = EADM_IDLE;
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ return ret;
+}
+
+static struct css_device_id eadm_subchannel_ids[] = {
+ { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
+
+static struct css_driver eadm_subchannel_driver = {
+ .drv = {
+ .name = "eadm_subchannel",
+ .owner = THIS_MODULE,
+ },
+ .subchannel_type = eadm_subchannel_ids,
+ .irq = eadm_subchannel_irq,
+ .probe = eadm_subchannel_probe,
+ .remove = eadm_subchannel_remove,
+ .shutdown = eadm_subchannel_shutdown,
+ .sch_event = eadm_subchannel_sch_event,
+ .freeze = eadm_subchannel_freeze,
+ .thaw = eadm_subchannel_restore,
+ .restore = eadm_subchannel_restore,
+};
+
+static int __init eadm_sch_init(void)
+{
+ int ret;
+
+ if (!css_general_characteristics.eadm)
+ return -ENXIO;
+
+ eadm_debug = debug_register("eadm_log", 16, 1, 16);
+ if (!eadm_debug)
+ return -ENOMEM;
+
+ debug_register_view(eadm_debug, &debug_hex_ascii_view);
+ debug_set_level(eadm_debug, 2);
+
+ isc_register(EADM_SCH_ISC);
+ ret = css_driver_register(&eadm_subchannel_driver);
+ if (ret)
+ goto cleanup;
+
+ return ret;
+
+cleanup:
+ isc_unregister(EADM_SCH_ISC);
+ debug_unregister(eadm_debug);
+ return ret;
+}
+
+static void __exit eadm_sch_exit(void)
+{
+ css_driver_unregister(&eadm_subchannel_driver);
+ isc_unregister(EADM_SCH_ISC);
+ debug_unregister(eadm_debug);
+}
+module_init(eadm_sch_init);
+module_exit(eadm_sch_exit);
diff --git a/kernel/drivers/s390/cio/eadm_sch.h b/kernel/drivers/s390/cio/eadm_sch.h
new file mode 100644
index 000000000..9664e4653
--- /dev/null
+++ b/kernel/drivers/s390/cio/eadm_sch.h
@@ -0,0 +1,22 @@
+#ifndef EADM_SCH_H
+#define EADM_SCH_H
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include "orb.h"
+
+struct eadm_private {
+ union orb orb;
+ enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
+ struct completion *completion;
+ struct subchannel *sch;
+ struct timer_list timer;
+ struct list_head head;
+} __aligned(8);
+
+#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
+#define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p))
+
+#endif
diff --git a/kernel/drivers/s390/cio/fcx.c b/kernel/drivers/s390/cio/fcx.c
new file mode 100644
index 000000000..ca5e9bb9d
--- /dev/null
+++ b/kernel/drivers/s390/cio/fcx.c
@@ -0,0 +1,350 @@
+/*
+ * Functions for assembling fcx enabled I/O control blocks.
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <asm/fcx.h>
+#include "cio.h"
+
+/**
+ * tcw_get_intrg - return pointer to associated interrogate tcw
+ * @tcw: pointer to the original tcw
+ *
+ * Return a pointer to the interrogate tcw associated with the specified tcw
+ * or %NULL if there is no associated interrogate tcw.
+ */
+struct tcw *tcw_get_intrg(struct tcw *tcw)
+{
+ return (struct tcw *) ((addr_t) tcw->intrg);
+}
+EXPORT_SYMBOL(tcw_get_intrg);
+
+/**
+ * tcw_get_data - return pointer to input/output data associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return the input or output data address specified in the tcw depending
+ * on whether the r-bit or the w-bit is set. If neither bit is set, return
+ * %NULL.
+ */
+void *tcw_get_data(struct tcw *tcw)
+{
+ if (tcw->r)
+ return (void *) ((addr_t) tcw->input);
+ if (tcw->w)
+ return (void *) ((addr_t) tcw->output);
+ return NULL;
+}
+EXPORT_SYMBOL(tcw_get_data);
+
+/**
+ * tcw_get_tccb - return pointer to tccb associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return pointer to the tccb associated with this tcw.
+ */
+struct tccb *tcw_get_tccb(struct tcw *tcw)
+{
+ return (struct tccb *) ((addr_t) tcw->tccb);
+}
+EXPORT_SYMBOL(tcw_get_tccb);
+
+/**
+ * tcw_get_tsb - return pointer to tsb associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return pointer to the tsb associated with this tcw.
+ */
+struct tsb *tcw_get_tsb(struct tcw *tcw)
+{
+ return (struct tsb *) ((addr_t) tcw->tsb);
+}
+EXPORT_SYMBOL(tcw_get_tsb);
+
+/**
+ * tcw_init - initialize tcw data structure
+ * @tcw: pointer to the tcw to be initialized
+ * @r: initial value of the r-bit
+ * @w: initial value of the w-bit
+ *
+ * Initialize all fields of the specified tcw data structure with zero and
+ * fill in the format, flags, r and w fields.
+ */
+void tcw_init(struct tcw *tcw, int r, int w)
+{
+ memset(tcw, 0, sizeof(struct tcw));
+ tcw->format = TCW_FORMAT_DEFAULT;
+ tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
+ if (r)
+ tcw->r = 1;
+ if (w)
+ tcw->w = 1;
+}
+EXPORT_SYMBOL(tcw_init);
+
+static inline size_t tca_size(struct tccb *tccb)
+{
+ return tccb->tcah.tcal - 12;
+}
+
+static u32 calc_dcw_count(struct tccb *tccb)
+{
+ int offset;
+ struct dcw *dcw;
+ u32 count = 0;
+ size_t size;
+
+ size = tca_size(tccb);
+ for (offset = 0; offset < size;) {
+ dcw = (struct dcw *) &tccb->tca[offset];
+ count += dcw->count;
+ if (!(dcw->flags & DCW_FLAGS_CC))
+ break;
+ offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
+ }
+ return count;
+}
+
+static u32 calc_cbc_size(struct tidaw *tidaw, int num)
+{
+ int i;
+ u32 cbc_data;
+ u32 cbc_count = 0;
+ u64 data_count = 0;
+
+ for (i = 0; i < num; i++) {
+ if (tidaw[i].flags & TIDAW_FLAGS_LAST)
+ break;
+ /* TODO: find out if padding applies to total of data
+ * transferred or data transferred by this tidaw. Assumption:
+ * applies to total. */
+ data_count += tidaw[i].count;
+ if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
+ cbc_data = 4 + ALIGN(data_count, 4) - data_count;
+ cbc_count += cbc_data;
+ data_count += cbc_data;
+ }
+ }
+ return cbc_count;
+}
+
+/**
+ * tcw_finalize - finalize tcw length fields and tidaw list
+ * @tcw: pointer to the tcw
+ * @num_tidaws: the number of tidaws used to address input/output data or zero
+ * if no tida is used
+ *
+ * Calculate the input-/output-count and tccbl field in the tcw, add a
+ * tcat the tccb and terminate the data tidaw list if used.
+ *
+ * Note: in case input- or output-tida is used, the tidaw-list must be stored
+ * in contiguous storage (no ttic). The tcal field in the tccb must be
+ * up-to-date.
+ */
+void tcw_finalize(struct tcw *tcw, int num_tidaws)
+{
+ struct tidaw *tidaw;
+ struct tccb *tccb;
+ struct tccb_tcat *tcat;
+ u32 count;
+
+ /* Terminate tidaw list. */
+ tidaw = tcw_get_data(tcw);
+ if (num_tidaws > 0)
+ tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
+ /* Add tcat to tccb. */
+ tccb = tcw_get_tccb(tcw);
+ tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
+ memset(tcat, 0, sizeof(*tcat));
+ /* Calculate tcw input/output count and tcat transport count. */
+ count = calc_dcw_count(tccb);
+ if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
+ count += calc_cbc_size(tidaw, num_tidaws);
+ if (tcw->r)
+ tcw->input_count = count;
+ else if (tcw->w)
+ tcw->output_count = count;
+ tcat->count = ALIGN(count, 4) + 4;
+ /* Calculate tccbl. */
+ tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
+ sizeof(struct tccb_tcat) - 20) >> 2;
+}
+EXPORT_SYMBOL(tcw_finalize);
+
+/**
+ * tcw_set_intrg - set the interrogate tcw address of a tcw
+ * @tcw: the tcw address
+ * @intrg_tcw: the address of the interrogate tcw
+ *
+ * Set the address of the interrogate tcw in the specified tcw.
+ */
+void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
+{
+ tcw->intrg = (u32) ((addr_t) intrg_tcw);
+}
+EXPORT_SYMBOL(tcw_set_intrg);
+
+/**
+ * tcw_set_data - set data address and tida flag of a tcw
+ * @tcw: the tcw address
+ * @data: the data address
+ * @use_tidal: zero of the data address specifies a contiguous block of data,
+ * non-zero if it specifies a list if tidaws.
+ *
+ * Set the input/output data address of a tcw (depending on the value of the
+ * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
+ * is set as well.
+ */
+void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
+{
+ if (tcw->r) {
+ tcw->input = (u64) ((addr_t) data);
+ if (use_tidal)
+ tcw->flags |= TCW_FLAGS_INPUT_TIDA;
+ } else if (tcw->w) {
+ tcw->output = (u64) ((addr_t) data);
+ if (use_tidal)
+ tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
+ }
+}
+EXPORT_SYMBOL(tcw_set_data);
+
+/**
+ * tcw_set_tccb - set tccb address of a tcw
+ * @tcw: the tcw address
+ * @tccb: the tccb address
+ *
+ * Set the address of the tccb in the specified tcw.
+ */
+void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
+{
+ tcw->tccb = (u64) ((addr_t) tccb);
+}
+EXPORT_SYMBOL(tcw_set_tccb);
+
+/**
+ * tcw_set_tsb - set tsb address of a tcw
+ * @tcw: the tcw address
+ * @tsb: the tsb address
+ *
+ * Set the address of the tsb in the specified tcw.
+ */
+void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
+{
+ tcw->tsb = (u64) ((addr_t) tsb);
+}
+EXPORT_SYMBOL(tcw_set_tsb);
+
+/**
+ * tccb_init - initialize tccb
+ * @tccb: the tccb address
+ * @size: the maximum size of the tccb
+ * @sac: the service-action-code to be user
+ *
+ * Initialize the header of the specified tccb by resetting all values to zero
+ * and filling in defaults for format, sac and initial tcal fields.
+ */
+void tccb_init(struct tccb *tccb, size_t size, u32 sac)
+{
+ memset(tccb, 0, size);
+ tccb->tcah.format = TCCB_FORMAT_DEFAULT;
+ tccb->tcah.sac = sac;
+ tccb->tcah.tcal = 12;
+}
+EXPORT_SYMBOL(tccb_init);
+
+/**
+ * tsb_init - initialize tsb
+ * @tsb: the tsb address
+ *
+ * Initialize the specified tsb by resetting all values to zero.
+ */
+void tsb_init(struct tsb *tsb)
+{
+ memset(tsb, 0, sizeof(*tsb));
+}
+EXPORT_SYMBOL(tsb_init);
+
+/**
+ * tccb_add_dcw - add a dcw to the tccb
+ * @tccb: the tccb address
+ * @tccb_size: the maximum tccb size
+ * @cmd: the dcw command
+ * @flags: flags for the dcw
+ * @cd: pointer to control data for this dcw or NULL if none is required
+ * @cd_count: number of control data bytes for this dcw
+ * @count: number of data bytes for this dcw
+ *
+ * Add a new dcw to the specified tccb by writing the dcw information specified
+ * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
+ * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
+ * would exceed the available space as defined by @tccb_size.
+ *
+ * Note: the tcal field of the tccb header will be updates to reflect added
+ * content.
+ */
+struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
+ void *cd, u8 cd_count, u32 count)
+{
+ struct dcw *dcw;
+ int size;
+ int tca_offset;
+
+ /* Check for space. */
+ tca_offset = tca_size(tccb);
+ size = ALIGN(sizeof(struct dcw) + cd_count, 4);
+ if (sizeof(struct tccb_tcah) + tca_offset + size +
+ sizeof(struct tccb_tcat) > tccb_size)
+ return ERR_PTR(-ENOSPC);
+ /* Add dcw to tca. */
+ dcw = (struct dcw *) &tccb->tca[tca_offset];
+ memset(dcw, 0, size);
+ dcw->cmd = cmd;
+ dcw->flags = flags;
+ dcw->count = count;
+ dcw->cd_count = cd_count;
+ if (cd)
+ memcpy(&dcw->cd[0], cd, cd_count);
+ tccb->tcah.tcal += size;
+ return dcw;
+}
+EXPORT_SYMBOL(tccb_add_dcw);
+
+/**
+ * tcw_add_tidaw - add a tidaw to a tcw
+ * @tcw: the tcw address
+ * @num_tidaws: the current number of tidaws
+ * @flags: flags for the new tidaw
+ * @addr: address value for the new tidaw
+ * @count: count value for the new tidaw
+ *
+ * Add a new tidaw to the input/output data tidaw-list of the specified tcw
+ * (depending on the value of the r-flag and w-flag) and return a pointer to
+ * the new tidaw.
+ *
+ * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
+ * must ensure that there is enough space for the new tidaw. The last-tidaw
+ * flag for the last tidaw in the list will be set by tcw_finalize.
+ */
+struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
+ void *addr, u32 count)
+{
+ struct tidaw *tidaw;
+
+ /* Add tidaw to tidaw-list. */
+ tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
+ memset(tidaw, 0, sizeof(struct tidaw));
+ tidaw->flags = flags;
+ tidaw->count = count;
+ tidaw->addr = (u64) ((addr_t) addr);
+ return tidaw;
+}
+EXPORT_SYMBOL(tcw_add_tidaw);
diff --git a/kernel/drivers/s390/cio/idset.c b/kernel/drivers/s390/cio/idset.c
new file mode 100644
index 000000000..b3e06a7b9
--- /dev/null
+++ b/kernel/drivers/s390/cio/idset.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright IBM Corp. 2007, 2012
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include "idset.h"
+#include "css.h"
+
+struct idset {
+ int num_ssid;
+ int num_id;
+ unsigned long bitmap[0];
+};
+
+static inline unsigned long bitmap_size(int num_ssid, int num_id)
+{
+ return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
+}
+
+static struct idset *idset_new(int num_ssid, int num_id)
+{
+ struct idset *set;
+
+ set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
+ if (set) {
+ set->num_ssid = num_ssid;
+ set->num_id = num_id;
+ memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
+ }
+ return set;
+}
+
+void idset_free(struct idset *set)
+{
+ vfree(set);
+}
+
+void idset_fill(struct idset *set)
+{
+ memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
+}
+
+static inline void idset_add(struct idset *set, int ssid, int id)
+{
+ set_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline void idset_del(struct idset *set, int ssid, int id)
+{
+ clear_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline int idset_contains(struct idset *set, int ssid, int id)
+{
+ return test_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline int idset_get_first(struct idset *set, int *ssid, int *id)
+{
+ int bitnum;
+
+ bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
+ if (bitnum >= set->num_ssid * set->num_id)
+ return 0;
+ *ssid = bitnum / set->num_id;
+ *id = bitnum % set->num_id;
+ return 1;
+}
+
+struct idset *idset_sch_new(void)
+{
+ return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
+}
+
+void idset_sch_add(struct idset *set, struct subchannel_id schid)
+{
+ idset_add(set, schid.ssid, schid.sch_no);
+}
+
+void idset_sch_del(struct idset *set, struct subchannel_id schid)
+{
+ idset_del(set, schid.ssid, schid.sch_no);
+}
+
+/* Clear ids starting from @schid up to end of subchannel set. */
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid)
+{
+ int pos = schid.ssid * set->num_id + schid.sch_no;
+
+ bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no);
+}
+
+int idset_sch_contains(struct idset *set, struct subchannel_id schid)
+{
+ return idset_contains(set, schid.ssid, schid.sch_no);
+}
+
+int idset_is_empty(struct idset *set)
+{
+ return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
+}
+
+void idset_add_set(struct idset *to, struct idset *from)
+{
+ int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id);
+
+ bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
+}
diff --git a/kernel/drivers/s390/cio/idset.h b/kernel/drivers/s390/cio/idset.h
new file mode 100644
index 000000000..22b581046
--- /dev/null
+++ b/kernel/drivers/s390/cio/idset.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright IBM Corp. 2007, 2012
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_IDSET_H
+#define S390_IDSET_H S390_IDSET_H
+
+#include <asm/schid.h>
+
+struct idset;
+
+void idset_free(struct idset *set);
+void idset_fill(struct idset *set);
+
+struct idset *idset_sch_new(void);
+void idset_sch_add(struct idset *set, struct subchannel_id id);
+void idset_sch_del(struct idset *set, struct subchannel_id id);
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
+int idset_sch_contains(struct idset *set, struct subchannel_id id);
+int idset_is_empty(struct idset *set);
+void idset_add_set(struct idset *to, struct idset *from);
+
+#endif /* S390_IDSET_H */
diff --git a/kernel/drivers/s390/cio/io_sch.h b/kernel/drivers/s390/cio/io_sch.h
new file mode 100644
index 000000000..b108f4a5c
--- /dev/null
+++ b/kernel/drivers/s390/cio/io_sch.h
@@ -0,0 +1,217 @@
+#ifndef S390_IO_SCH_H
+#define S390_IO_SCH_H
+
+#include <linux/types.h>
+#include <asm/schid.h>
+#include <asm/ccwdev.h>
+#include <asm/irq.h>
+#include "css.h"
+#include "orb.h"
+
+struct io_subchannel_private {
+ union orb orb; /* operation request block */
+ struct ccw1 sense_ccw; /* static ccw for sense command */
+ struct ccw_device *cdev;/* pointer to the child ccw device */
+ struct {
+ unsigned int suspend:1; /* allow suspend */
+ unsigned int prefetch:1;/* deny prefetch */
+ unsigned int inter:1; /* suppress intermediate interrupts */
+ } __packed options;
+} __aligned(8);
+
+#define to_io_private(n) ((struct io_subchannel_private *) \
+ dev_get_drvdata(&(n)->dev))
+#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
+
+static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ return priv ? priv->cdev : NULL;
+}
+
+static inline void sch_set_cdev(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ if (priv)
+ priv->cdev = cdev;
+}
+
+#define MAX_CIWS 8
+
+/*
+ * Possible status values for a CCW request's I/O.
+ */
+enum io_status {
+ IO_DONE,
+ IO_RUNNING,
+ IO_STATUS_ERROR,
+ IO_PATH_ERROR,
+ IO_REJECTED,
+ IO_KILLED
+};
+
+/**
+ * ccw_request - Internal CCW request.
+ * @cp: channel program to start
+ * @timeout: maximum allowable time in jiffies between start I/O and interrupt
+ * @maxretries: number of retries per I/O operation and path
+ * @lpm: mask of paths to use
+ * @check: optional callback that determines if results are final
+ * @filter: optional callback to adjust request status based on IRB data
+ * @callback: final callback
+ * @data: user-defined pointer passed to all callbacks
+ * @singlepath: if set, use only one path from @lpm per start I/O
+ * @cancel: non-zero if request was cancelled
+ * @done: non-zero if request was finished
+ * @mask: current path mask
+ * @retries: current number of retries
+ * @drc: delayed return code
+ */
+struct ccw_request {
+ struct ccw1 *cp;
+ unsigned long timeout;
+ u16 maxretries;
+ u8 lpm;
+ int (*check)(struct ccw_device *, void *);
+ enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
+ enum io_status);
+ void (*callback)(struct ccw_device *, void *, int);
+ void *data;
+ unsigned int singlepath:1;
+ /* These fields are used internally. */
+ unsigned int cancel:1;
+ unsigned int done:1;
+ u16 mask;
+ u16 retries;
+ int drc;
+} __attribute__((packed));
+
+/*
+ * sense-id response buffer layout
+ */
+struct senseid {
+ /* common part */
+ u8 reserved; /* always 0x'FF' */
+ u16 cu_type; /* control unit type */
+ u8 cu_model; /* control unit model */
+ u16 dev_type; /* device type */
+ u8 dev_model; /* device model */
+ u8 unused; /* padding byte */
+ /* extended part */
+ struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
+} __attribute__ ((packed, aligned(4)));
+
+enum cdev_todo {
+ CDEV_TODO_NOTHING,
+ CDEV_TODO_ENABLE_CMF,
+ CDEV_TODO_REBIND,
+ CDEV_TODO_REGISTER,
+ CDEV_TODO_UNREG,
+ CDEV_TODO_UNREG_EVAL,
+};
+
+#define FAKE_CMD_IRB 1
+#define FAKE_TM_IRB 2
+
+struct ccw_device_private {
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+ int state; /* device state */
+ atomic_t onoff;
+ struct ccw_dev_id dev_id; /* device id */
+ struct subchannel_id schid; /* subchannel number */
+ struct ccw_request req; /* internal I/O request */
+ int iretry;
+ u8 pgid_valid_mask; /* mask of valid PGIDs */
+ u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
+ u8 pgid_reset_mask; /* mask of PGIDs which were reset */
+ u8 path_noirq_mask; /* mask of paths for which no irq was
+ received */
+ u8 path_notoper_mask; /* mask of paths which were found
+ not operable */
+ u8 path_gone_mask; /* mask of paths, that became unavailable */
+ u8 path_new_mask; /* mask of paths, that became available */
+ struct {
+ unsigned int fast:1; /* post with "channel end" */
+ unsigned int repall:1; /* report every interrupt status */
+ unsigned int pgroup:1; /* do path grouping */
+ unsigned int force:1; /* allow forced online */
+ unsigned int mpath:1; /* do multipathing */
+ } __attribute__ ((packed)) options;
+ struct {
+ unsigned int esid:1; /* Ext. SenseID supported by HW */
+ unsigned int dosense:1; /* delayed SENSE required */
+ unsigned int doverify:1; /* delayed path verification */
+ unsigned int donotify:1; /* call notify function */
+ unsigned int recog_done:1; /* dev. recog. complete */
+ unsigned int fake_irb:2; /* deliver faked irb */
+ unsigned int resuming:1; /* recognition while resume */
+ unsigned int pgroup:1; /* pathgroup is set up */
+ unsigned int mpath:1; /* multipathing is set up */
+ unsigned int pgid_unknown:1;/* unknown pgid state */
+ unsigned int initialized:1; /* set if initial reference held */
+ } __attribute__((packed)) flags;
+ unsigned long intparm; /* user interruption parameter */
+ struct qdio_irq *qdio_data;
+ struct irb irb; /* device status */
+ struct senseid senseid; /* SenseID info */
+ struct pgid pgid[8]; /* path group IDs per chpid*/
+ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
+ struct work_struct todo_work;
+ enum cdev_todo todo;
+ wait_queue_head_t wait_q;
+ struct timer_list timer;
+ void *cmb; /* measurement information */
+ struct list_head cmb_list; /* list of measured devices */
+ u64 cmb_start_time; /* clock value of cmb reset */
+ void *cmb_wait; /* deferred cmb enable/disable */
+ enum interruption_class int_class;
+};
+
+static inline int rsch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " rsch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc", "memory");
+ return ccode;
+}
+
+static inline int hsch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " hsch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
+ return ccode;
+}
+
+static inline int xsch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " .insn rre,0xb2760000,%1,0\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
+ return ccode;
+}
+
+#endif
diff --git a/kernel/drivers/s390/cio/ioasm.h b/kernel/drivers/s390/cio/ioasm.h
new file mode 100644
index 000000000..4d80fc67a
--- /dev/null
+++ b/kernel/drivers/s390/cio/ioasm.h
@@ -0,0 +1,167 @@
+#ifndef S390_CIO_IOASM_H
+#define S390_CIO_IOASM_H
+
+#include <asm/chpid.h>
+#include <asm/schid.h>
+#include "orb.h"
+#include "cio.h"
+
+/*
+ * TPI info structure
+ */
+struct tpi_info {
+ struct subchannel_id schid;
+ __u32 intparm; /* interruption parameter */
+ __u32 adapter_IO : 1;
+ __u32 reserved2 : 1;
+ __u32 isc : 3;
+ __u32 reserved3 : 12;
+ __u32 int_type : 3;
+ __u32 reserved4 : 12;
+} __attribute__ ((packed));
+
+
+/*
+ * Some S390 specific IO instructions as inline
+ */
+
+static inline int stsch_err(struct subchannel_id schid, struct schib *addr)
+{
+ register struct subchannel_id reg1 asm ("1") = schid;
+ int ccode = -EIO;
+
+ asm volatile(
+ " stsch 0(%3)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (ccode), "=m" (*addr)
+ : "d" (reg1), "a" (addr)
+ : "cc");
+ return ccode;
+}
+
+static inline int msch(struct subchannel_id schid, struct schib *addr)
+{
+ register struct subchannel_id reg1 asm ("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " msch 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc");
+ return ccode;
+}
+
+static inline int msch_err(struct subchannel_id schid, struct schib *addr)
+{
+ register struct subchannel_id reg1 asm ("1") = schid;
+ int ccode = -EIO;
+
+ asm volatile(
+ " msch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc");
+ return ccode;
+}
+
+static inline int tsch(struct subchannel_id schid, struct irb *addr)
+{
+ register struct subchannel_id reg1 asm ("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " tsch 0(%3)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode), "=m" (*addr)
+ : "d" (reg1), "a" (addr)
+ : "cc");
+ return ccode;
+}
+
+static inline int ssch(struct subchannel_id schid, union orb *addr)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode = -EIO;
+
+ asm volatile(
+ " ssch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc", "memory");
+ return ccode;
+}
+
+static inline int csch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " csch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
+ return ccode;
+}
+
+static inline int tpi(struct tpi_info *addr)
+{
+ int ccode;
+
+ asm volatile(
+ " tpi 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode), "=m" (*addr)
+ : "a" (addr)
+ : "cc");
+ return ccode;
+}
+
+static inline int chsc(void *chsc_area)
+{
+ typedef struct { char _[4096]; } addr_type;
+ int cc;
+
+ asm volatile(
+ " .insn rre,0xb25f0000,%2,0\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc), "=m" (*(addr_type *) chsc_area)
+ : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
+ : "cc");
+ return cc;
+}
+
+static inline int rchp(struct chp_id chpid)
+{
+ register struct chp_id reg1 asm ("1") = chpid;
+ int ccode;
+
+ asm volatile(
+ " lr 1,%1\n"
+ " rchp\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode) : "d" (reg1) : "cc");
+ return ccode;
+}
+
+#endif
diff --git a/kernel/drivers/s390/cio/isc.c b/kernel/drivers/s390/cio/isc.c
new file mode 100644
index 000000000..c592087be
--- /dev/null
+++ b/kernel/drivers/s390/cio/isc.c
@@ -0,0 +1,68 @@
+/*
+ * Functions for registration of I/O interruption subclasses on s390.
+ *
+ * Copyright IBM Corp. 2008
+ * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <asm/isc.h>
+
+static unsigned int isc_refs[MAX_ISC + 1];
+static DEFINE_SPINLOCK(isc_ref_lock);
+
+
+/**
+ * isc_register - register an I/O interruption subclass.
+ * @isc: I/O interruption subclass to register
+ *
+ * The number of users for @isc is increased. If this is the first user to
+ * register @isc, the corresponding I/O interruption subclass mask is enabled.
+ *
+ * Context:
+ * This function must not be called in interrupt context.
+ */
+void isc_register(unsigned int isc)
+{
+ if (isc > MAX_ISC) {
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock(&isc_ref_lock);
+ if (isc_refs[isc] == 0)
+ ctl_set_bit(6, 31 - isc);
+ isc_refs[isc]++;
+ spin_unlock(&isc_ref_lock);
+}
+EXPORT_SYMBOL_GPL(isc_register);
+
+/**
+ * isc_unregister - unregister an I/O interruption subclass.
+ * @isc: I/O interruption subclass to unregister
+ *
+ * The number of users for @isc is decreased. If this is the last user to
+ * unregister @isc, the corresponding I/O interruption subclass mask is
+ * disabled.
+ * Note: This function must not be called if isc_register() hasn't been called
+ * before by the driver for @isc.
+ *
+ * Context:
+ * This function must not be called in interrupt context.
+ */
+void isc_unregister(unsigned int isc)
+{
+ spin_lock(&isc_ref_lock);
+ /* check for misuse */
+ if (isc > MAX_ISC || isc_refs[isc] == 0) {
+ WARN_ON(1);
+ goto out_unlock;
+ }
+ if (isc_refs[isc] == 1)
+ ctl_clear_bit(6, 31 - isc);
+ isc_refs[isc]--;
+out_unlock:
+ spin_unlock(&isc_ref_lock);
+}
+EXPORT_SYMBOL_GPL(isc_unregister);
diff --git a/kernel/drivers/s390/cio/itcw.c b/kernel/drivers/s390/cio/itcw.c
new file mode 100644
index 000000000..358ee16d1
--- /dev/null
+++ b/kernel/drivers/s390/cio/itcw.c
@@ -0,0 +1,369 @@
+/*
+ * Functions for incremental construction of fcx enabled I/O control blocks.
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <asm/fcx.h>
+#include <asm/itcw.h>
+
+/**
+ * struct itcw - incremental tcw helper data type
+ *
+ * This structure serves as a handle for the incremental construction of a
+ * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate
+ * tcw and associated data. The data structures are contained inside a single
+ * contiguous buffer provided by the user.
+ *
+ * The itcw construction functions take care of overall data integrity:
+ * - reset unused fields to zero
+ * - fill in required pointers
+ * - ensure required alignment for data structures
+ * - prevent data structures to cross 4k-byte boundary where required
+ * - calculate tccb-related length fields
+ * - optionally provide ready-made interrogate tcw and associated structures
+ *
+ * Restrictions apply to the itcws created with these construction functions:
+ * - tida only supported for data address, not for tccb
+ * - only contiguous tidaw-lists (no ttic)
+ * - total number of bytes required per itcw may not exceed 4k bytes
+ * - either read or write operation (may not work with r=0 and w=0)
+ *
+ * Example:
+ * struct itcw *itcw;
+ * void *buffer;
+ * size_t size;
+ *
+ * size = itcw_calc_size(1, 2, 0);
+ * buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ * if (!buffer)
+ * return -ENOMEM;
+ * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
+ * if (IS_ERR(itcw))
+ * return PTR_ER(itcw);
+ * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72);
+ * itcw_add_tidaw(itcw, 0, 0x30000, 20);
+ * itcw_add_tidaw(itcw, 0, 0x40000, 52);
+ * itcw_finalize(itcw);
+ *
+ */
+struct itcw {
+ struct tcw *tcw;
+ struct tcw *intrg_tcw;
+ int num_tidaws;
+ int max_tidaws;
+ int intrg_num_tidaws;
+ int intrg_max_tidaws;
+};
+
+/**
+ * itcw_get_tcw - return pointer to tcw associated with the itcw
+ * @itcw: address of the itcw
+ *
+ * Return pointer to the tcw associated with the itcw.
+ */
+struct tcw *itcw_get_tcw(struct itcw *itcw)
+{
+ return itcw->tcw;
+}
+EXPORT_SYMBOL(itcw_get_tcw);
+
+/**
+ * itcw_calc_size - return the size of an itcw with the given parameters
+ * @intrg: if non-zero, add an interrogate tcw
+ * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
+ * if no tida is to be used.
+ * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
+ * by the interrogate tcw, if specified
+ *
+ * Calculate and return the number of bytes required to hold an itcw with the
+ * given parameters and assuming tccbs with maximum size.
+ *
+ * Note that the resulting size also contains bytes needed for alignment
+ * padding as well as padding to ensure that data structures don't cross a
+ * 4k-boundary where required.
+ */
+size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
+{
+ size_t len;
+ int cross_count;
+
+ /* Main data. */
+ len = sizeof(struct itcw);
+ len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
+ /* TSB */ sizeof(struct tsb) +
+ /* TIDAL */ max_tidaws * sizeof(struct tidaw);
+ /* Interrogate data. */
+ if (intrg) {
+ len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
+ /* TSB */ sizeof(struct tsb) +
+ /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
+ }
+
+ /* Maximum required alignment padding. */
+ len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
+
+ /* TIDAW lists may not cross a 4k boundary. To cross a
+ * boundary we need to add a TTIC TIDAW. We need to reserve
+ * one additional TIDAW for a TTIC that we may need to add due
+ * to the placement of the data chunk in memory, and a further
+ * TIDAW for each page boundary that the TIDAW list may cross
+ * due to it's own size.
+ */
+ if (max_tidaws) {
+ cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ len += cross_count * sizeof(struct tidaw);
+ }
+ if (intrg_max_tidaws) {
+ cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ len += cross_count * sizeof(struct tidaw);
+ }
+ return len;
+}
+EXPORT_SYMBOL(itcw_calc_size);
+
+#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095))
+
+static inline void *fit_chunk(addr_t *start, addr_t end, size_t len,
+ int align, int check_4k)
+{
+ addr_t addr;
+
+ addr = ALIGN(*start, align);
+ if (check_4k && CROSS4K(addr, len)) {
+ addr = ALIGN(addr, 4096);
+ addr = ALIGN(addr, align);
+ }
+ if (addr + len > end)
+ return ERR_PTR(-ENOSPC);
+ *start = addr + len;
+ return (void *) addr;
+}
+
+/**
+ * itcw_init - initialize incremental tcw data structure
+ * @buffer: address of buffer to use for data structures
+ * @size: number of bytes in buffer
+ * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write
+ * operation tcw
+ * @intrg: if non-zero, add and initialize an interrogate tcw
+ * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
+ * if no tida is to be used.
+ * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
+ * by the interrogate tcw, if specified
+ *
+ * Prepare the specified buffer to be used as an incremental tcw, i.e. a
+ * helper data structure that can be used to construct a valid tcw by
+ * successive calls to other helper functions. Note: the buffer needs to be
+ * located below the 2G address limit. The resulting tcw has the following
+ * restrictions:
+ * - no tccb tidal
+ * - input/output tidal is contiguous (no ttic)
+ * - total data should not exceed 4k
+ * - tcw specifies either read or write operation
+ *
+ * On success, return pointer to the resulting incremental tcw data structure,
+ * ERR_PTR otherwise.
+ */
+struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
+ int max_tidaws, int intrg_max_tidaws)
+{
+ struct itcw *itcw;
+ void *chunk;
+ addr_t start;
+ addr_t end;
+ int cross_count;
+
+ /* Check for 2G limit. */
+ start = (addr_t) buffer;
+ end = start + size;
+ if (end > (1 << 31))
+ return ERR_PTR(-EINVAL);
+ memset(buffer, 0, size);
+ /* ITCW. */
+ chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ itcw = chunk;
+ /* allow for TTIC tidaws that may be needed to cross a page boundary */
+ cross_count = 0;
+ if (max_tidaws)
+ cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ itcw->max_tidaws = max_tidaws + cross_count;
+ cross_count = 0;
+ if (intrg_max_tidaws)
+ cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
+ /* Main TCW. */
+ chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ itcw->tcw = chunk;
+ tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0,
+ (op == ITCW_OP_WRITE) ? 1 : 0);
+ /* Interrogate TCW. */
+ if (intrg) {
+ chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ itcw->intrg_tcw = chunk;
+ tcw_init(itcw->intrg_tcw, 1, 0);
+ tcw_set_intrg(itcw->tcw, itcw->intrg_tcw);
+ }
+ /* Data TIDAL. */
+ if (max_tidaws > 0) {
+ chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
+ itcw->max_tidaws, 16, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tcw_set_data(itcw->tcw, chunk, 1);
+ }
+ /* Interrogate data TIDAL. */
+ if (intrg && (intrg_max_tidaws > 0)) {
+ chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
+ itcw->intrg_max_tidaws, 16, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tcw_set_data(itcw->intrg_tcw, chunk, 1);
+ }
+ /* TSB. */
+ chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tsb_init(chunk);
+ tcw_set_tsb(itcw->tcw, chunk);
+ /* Interrogate TSB. */
+ if (intrg) {
+ chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tsb_init(chunk);
+ tcw_set_tsb(itcw->intrg_tcw, chunk);
+ }
+ /* TCCB. */
+ chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT);
+ tcw_set_tccb(itcw->tcw, chunk);
+ /* Interrogate TCCB. */
+ if (intrg) {
+ chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG);
+ tcw_set_tccb(itcw->intrg_tcw, chunk);
+ tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL,
+ sizeof(struct dcw_intrg_data), 0);
+ tcw_finalize(itcw->intrg_tcw, 0);
+ }
+ return itcw;
+}
+EXPORT_SYMBOL(itcw_init);
+
+/**
+ * itcw_add_dcw - add a dcw to the itcw
+ * @itcw: address of the itcw
+ * @cmd: the dcw command
+ * @flags: flags for the dcw
+ * @cd: address of control data for this dcw or NULL if none is required
+ * @cd_count: number of control data bytes for this dcw
+ * @count: number of data bytes for this dcw
+ *
+ * Add a new dcw to the specified itcw by writing the dcw information specified
+ * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
+ * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
+ * would exceed the available space.
+ *
+ * Note: the tcal field of the tccb header will be updated to reflect added
+ * content.
+ */
+struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
+ u8 cd_count, u32 count)
+{
+ return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd,
+ flags, cd, cd_count, count);
+}
+EXPORT_SYMBOL(itcw_add_dcw);
+
+/**
+ * itcw_add_tidaw - add a tidaw to the itcw
+ * @itcw: address of the itcw
+ * @flags: flags for the new tidaw
+ * @addr: address value for the new tidaw
+ * @count: count value for the new tidaw
+ *
+ * Add a new tidaw to the input/output data tidaw-list of the specified itcw
+ * (depending on the value of the r-flag and w-flag). Return a pointer to
+ * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
+ * available space.
+ *
+ * Note: TTIC tidaws are automatically added when needed, so explicitly calling
+ * this interface with the TTIC flag is not supported. The last-tidaw flag
+ * for the last tidaw in the list will be set by itcw_finalize.
+ */
+struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
+{
+ struct tidaw *following;
+
+ if (itcw->num_tidaws >= itcw->max_tidaws)
+ return ERR_PTR(-ENOSPC);
+ /*
+ * Is the tidaw, which follows the one we are about to fill, on the next
+ * page? Then we have to insert a TTIC tidaw first, that points to the
+ * tidaw on the new page.
+ */
+ following = ((struct tidaw *) tcw_get_data(itcw->tcw))
+ + itcw->num_tidaws + 1;
+ if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
+ tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
+ TIDAW_FLAGS_TTIC, following, 0);
+ if (itcw->num_tidaws >= itcw->max_tidaws)
+ return ERR_PTR(-ENOSPC);
+ }
+ return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
+}
+EXPORT_SYMBOL(itcw_add_tidaw);
+
+/**
+ * itcw_set_data - set data address and tida flag of the itcw
+ * @itcw: address of the itcw
+ * @addr: the data address
+ * @use_tidal: zero of the data address specifies a contiguous block of data,
+ * non-zero if it specifies a list if tidaws.
+ *
+ * Set the input/output data address of the itcw (depending on the value of the
+ * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
+ * is set as well.
+ */
+void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal)
+{
+ tcw_set_data(itcw->tcw, addr, use_tidal);
+}
+EXPORT_SYMBOL(itcw_set_data);
+
+/**
+ * itcw_finalize - calculate length and count fields of the itcw
+ * @itcw: address of the itcw
+ *
+ * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb.
+ * In case input- or output-tida is used, the tidaw-list must be stored in
+ * continuous storage (no ttic). The tcal field in the tccb must be
+ * up-to-date.
+ */
+void itcw_finalize(struct itcw *itcw)
+{
+ tcw_finalize(itcw->tcw, itcw->num_tidaws);
+}
+EXPORT_SYMBOL(itcw_finalize);
diff --git a/kernel/drivers/s390/cio/orb.h b/kernel/drivers/s390/cio/orb.h
new file mode 100644
index 000000000..7a640530e
--- /dev/null
+++ b/kernel/drivers/s390/cio/orb.h
@@ -0,0 +1,91 @@
+/*
+ * Orb related data structures.
+ *
+ * Copyright IBM Corp. 2007, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ * Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#ifndef S390_ORB_H
+#define S390_ORB_H
+
+/*
+ * Command-mode operation request block
+ */
+struct cmd_orb {
+ u32 intparm; /* interruption parameter */
+ u32 key:4; /* flags, like key, suspend control, etc. */
+ u32 spnd:1; /* suspend control */
+ u32 res1:1; /* reserved */
+ u32 mod:1; /* modification control */
+ u32 sync:1; /* synchronize control */
+ u32 fmt:1; /* format control */
+ u32 pfch:1; /* prefetch control */
+ u32 isic:1; /* initial-status-interruption control */
+ u32 alcc:1; /* address-limit-checking control */
+ u32 ssic:1; /* suppress-suspended-interr. control */
+ u32 res2:1; /* reserved */
+ u32 c64:1; /* IDAW/QDIO 64 bit control */
+ u32 i2k:1; /* IDAW 2/4kB block size control */
+ u32 lpm:8; /* logical path mask */
+ u32 ils:1; /* incorrect length */
+ u32 zero:6; /* reserved zeros */
+ u32 orbx:1; /* ORB extension control */
+ u32 cpa; /* channel program address */
+} __packed __aligned(4);
+
+/*
+ * Transport-mode operation request block
+ */
+struct tm_orb {
+ u32 intparm;
+ u32 key:4;
+ u32:9;
+ u32 b:1;
+ u32:2;
+ u32 lpm:8;
+ u32:7;
+ u32 x:1;
+ u32 tcw;
+ u32 prio:8;
+ u32:8;
+ u32 rsvpgm:8;
+ u32:8;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+} __packed __aligned(4);
+
+/*
+ * eadm operation request block
+ */
+struct eadm_orb {
+ u32 intparm;
+ u32 key:4;
+ u32:4;
+ u32 compat1:1;
+ u32 compat2:1;
+ u32:21;
+ u32 x:1;
+ u32 aob;
+ u32 css_prio:8;
+ u32:8;
+ u32 scm_prio:8;
+ u32:8;
+ u32:29;
+ u32 fmt:3;
+ u32:32;
+ u32:32;
+ u32:32;
+} __packed __aligned(4);
+
+union orb {
+ struct cmd_orb cmd;
+ struct tm_orb tm;
+ struct eadm_orb eadm;
+} __packed __aligned(4);
+
+#endif /* S390_ORB_H */
diff --git a/kernel/drivers/s390/cio/qdio.h b/kernel/drivers/s390/cio/qdio.h
new file mode 100644
index 000000000..7e70f9298
--- /dev/null
+++ b/kernel/drivers/s390/cio/qdio.h
@@ -0,0 +1,418 @@
+/*
+ * Copyright IBM Corp. 2000, 2009
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#ifndef _CIO_QDIO_H
+#define _CIO_QDIO_H
+
+#include <asm/page.h>
+#include <asm/schid.h>
+#include <asm/debug.h>
+#include "chsc.h"
+
+#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
+#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
+#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
+#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
+
+enum qdio_irq_states {
+ QDIO_IRQ_STATE_INACTIVE,
+ QDIO_IRQ_STATE_ESTABLISHED,
+ QDIO_IRQ_STATE_ACTIVE,
+ QDIO_IRQ_STATE_STOPPED,
+ QDIO_IRQ_STATE_CLEANUP,
+ QDIO_IRQ_STATE_ERR,
+ NR_QDIO_IRQ_STATES,
+};
+
+/* used as intparm in do_IO */
+#define QDIO_DOING_ESTABLISH 1
+#define QDIO_DOING_ACTIVATE 2
+#define QDIO_DOING_CLEANUP 3
+
+#define SLSB_STATE_NOT_INIT 0x0
+#define SLSB_STATE_EMPTY 0x1
+#define SLSB_STATE_PRIMED 0x2
+#define SLSB_STATE_PENDING 0x3
+#define SLSB_STATE_HALTED 0xe
+#define SLSB_STATE_ERROR 0xf
+#define SLSB_TYPE_INPUT 0x0
+#define SLSB_TYPE_OUTPUT 0x20
+#define SLSB_OWNER_PROG 0x80
+#define SLSB_OWNER_CU 0x40
+
+#define SLSB_P_INPUT_NOT_INIT \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */
+#define SLSB_P_INPUT_ACK \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */
+#define SLSB_CU_INPUT_EMPTY \
+ (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */
+#define SLSB_P_INPUT_PRIMED \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */
+#define SLSB_P_INPUT_HALTED \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */
+#define SLSB_P_INPUT_ERROR \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */
+#define SLSB_P_OUTPUT_NOT_INIT \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
+#define SLSB_P_OUTPUT_EMPTY \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
+#define SLSB_P_OUTPUT_PENDING \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */
+#define SLSB_CU_OUTPUT_PRIMED \
+ (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
+#define SLSB_P_OUTPUT_HALTED \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */
+#define SLSB_P_OUTPUT_ERROR \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */
+
+#define SLSB_ERROR_DURING_LOOKUP 0xff
+
+/* additional CIWs returned by extended Sense-ID */
+#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
+#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
+
+/* flags for st qdio sch data */
+#define CHSC_FLAG_QDIO_CAPABILITY 0x80
+#define CHSC_FLAG_VALIDITY 0x40
+
+/* SIGA flags */
+#define QDIO_SIGA_WRITE 0x00
+#define QDIO_SIGA_READ 0x01
+#define QDIO_SIGA_SYNC 0x02
+#define QDIO_SIGA_WRITEQ 0x04
+#define QDIO_SIGA_QEBSM_FLAG 0x80
+
+static inline int do_sqbs(u64 token, unsigned char state, int queue,
+ int *start, int *count)
+{
+ register unsigned long _ccq asm ("0") = *count;
+ register unsigned long _token asm ("1") = token;
+ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+
+ asm volatile(
+ " .insn rsy,0xeb000000008A,%1,0,0(%2)"
+ : "+d" (_ccq), "+d" (_queuestart)
+ : "d" ((unsigned long)state), "d" (_token)
+ : "memory", "cc");
+ *count = _ccq & 0xff;
+ *start = _queuestart & 0xff;
+
+ return (_ccq >> 32) & 0xff;
+}
+
+static inline int do_eqbs(u64 token, unsigned char *state, int queue,
+ int *start, int *count, int ack)
+{
+ register unsigned long _ccq asm ("0") = *count;
+ register unsigned long _token asm ("1") = token;
+ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+ unsigned long _state = (unsigned long)ack << 63;
+
+ asm volatile(
+ " .insn rrf,0xB99c0000,%1,%2,0,0"
+ : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
+ : "d" (_token)
+ : "memory", "cc");
+ *count = _ccq & 0xff;
+ *start = _queuestart & 0xff;
+ *state = _state & 0xff;
+
+ return (_ccq >> 32) & 0xff;
+}
+
+struct qdio_irq;
+
+struct siga_flag {
+ u8 input:1;
+ u8 output:1;
+ u8 sync:1;
+ u8 sync_after_ai:1;
+ u8 sync_out_after_pci:1;
+ u8:3;
+} __attribute__ ((packed));
+
+struct qdio_dev_perf_stat {
+ unsigned int adapter_int;
+ unsigned int qdio_int;
+ unsigned int pci_request_int;
+
+ unsigned int tasklet_inbound;
+ unsigned int tasklet_inbound_resched;
+ unsigned int tasklet_inbound_resched2;
+ unsigned int tasklet_outbound;
+
+ unsigned int siga_read;
+ unsigned int siga_write;
+ unsigned int siga_sync;
+
+ unsigned int inbound_call;
+ unsigned int inbound_handler;
+ unsigned int stop_polling;
+ unsigned int inbound_queue_full;
+ unsigned int outbound_call;
+ unsigned int outbound_handler;
+ unsigned int outbound_queue_full;
+ unsigned int fast_requeue;
+ unsigned int target_full;
+ unsigned int eqbs;
+ unsigned int eqbs_partial;
+ unsigned int sqbs;
+ unsigned int sqbs_partial;
+ unsigned int int_discarded;
+} ____cacheline_aligned;
+
+struct qdio_queue_perf_stat {
+ /*
+ * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
+ * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
+ * aka 127 SBALs found.
+ */
+ unsigned int nr_sbals[8];
+ unsigned int nr_sbal_error;
+ unsigned int nr_sbal_nop;
+ unsigned int nr_sbal_total;
+};
+
+enum qdio_queue_irq_states {
+ QDIO_QUEUE_IRQS_DISABLED,
+};
+
+struct qdio_input_q {
+ /* input buffer acknowledgement flag */
+ int polling;
+ /* first ACK'ed buffer */
+ int ack_start;
+ /* how much sbals are acknowledged with qebsm */
+ int ack_count;
+ /* last time of noticing incoming data */
+ u64 timestamp;
+ /* upper-layer polling flag */
+ unsigned long queue_irq_state;
+ /* callback to start upper-layer polling */
+ void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
+};
+
+struct qdio_output_q {
+ /* PCIs are enabled for the queue */
+ int pci_out_enabled;
+ /* cq: use asynchronous output buffers */
+ int use_cq;
+ /* cq: aobs used for particual SBAL */
+ struct qaob **aobs;
+ /* cq: sbal state related to asynchronous operation */
+ struct qdio_outbuf_state *sbal_state;
+ /* timer to check for more outbound work */
+ struct timer_list timer;
+ /* used SBALs before tasklet schedule */
+ int scan_threshold;
+};
+
+/*
+ * Note on cache alignment: grouped slsb and write mostly data at the beginning
+ * sbal[] is read-only and starts on a new cacheline followed by read mostly.
+ */
+struct qdio_q {
+ struct slsb slsb;
+
+ union {
+ struct qdio_input_q in;
+ struct qdio_output_q out;
+ } u;
+
+ /*
+ * inbound: next buffer the program should check for
+ * outbound: next buffer to check if adapter processed it
+ */
+ int first_to_check;
+
+ /* first_to_check of the last time */
+ int last_move;
+
+ /* beginning position for calling the program */
+ int first_to_kick;
+
+ /* number of buffers in use by the adapter */
+ atomic_t nr_buf_used;
+
+ /* error condition during a data transfer */
+ unsigned int qdio_error;
+
+ /* last scan of the queue */
+ u64 timestamp;
+
+ struct tasklet_struct tasklet;
+ struct qdio_queue_perf_stat q_stats;
+
+ struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
+
+ /* queue number */
+ int nr;
+
+ /* bitmask of queue number */
+ int mask;
+
+ /* input or output queue */
+ int is_input_q;
+
+ /* list of thinint input queues */
+ struct list_head entry;
+
+ /* upper-layer program handler */
+ qdio_handler_t (*handler);
+
+ struct dentry *debugfs_q;
+ struct qdio_irq *irq_ptr;
+ struct sl *sl;
+ /*
+ * A page is allocated under this pointer and used for slib and sl.
+ * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
+ */
+ struct slib *slib;
+} __attribute__ ((aligned(256)));
+
+struct qdio_irq {
+ struct qib qib;
+ u32 *dsci; /* address of device state change indicator */
+ struct ccw_device *cdev;
+ struct dentry *debugfs_dev;
+ struct dentry *debugfs_perf;
+
+ unsigned long int_parm;
+ struct subchannel_id schid;
+ unsigned long sch_token; /* QEBSM facility */
+
+ enum qdio_irq_states state;
+
+ struct siga_flag siga_flag; /* siga sync information from qdioac */
+
+ int nr_input_qs;
+ int nr_output_qs;
+
+ struct ccw1 ccw;
+ struct ciw equeue;
+ struct ciw aqueue;
+
+ struct qdio_ssqd_desc ssqd_desc;
+ void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
+
+ int perf_stat_enabled;
+
+ struct qdr *qdr;
+ unsigned long chsc_page;
+
+ struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
+ struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
+
+ debug_info_t *debug_area;
+ struct mutex setup_mutex;
+ struct qdio_dev_perf_stat perf_stat;
+};
+
+/* helper functions */
+#define queue_type(q) q->irq_ptr->qib.qfmt
+#define SCH_NO(q) (q->irq_ptr->schid.sch_no)
+
+#define is_thinint_irq(irq) \
+ (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
+ css_general_characteristics.aif_osa)
+
+#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
+
+#define qperf_inc(__q, __attr) \
+({ \
+ struct qdio_irq *qdev = (__q)->irq_ptr; \
+ if (qdev->perf_stat_enabled) \
+ (qdev->perf_stat.__attr)++; \
+})
+
+static inline void account_sbals_error(struct qdio_q *q, int count)
+{
+ q->q_stats.nr_sbal_error += count;
+ q->q_stats.nr_sbal_total += count;
+}
+
+/* the highest iqdio queue is used for multicast */
+static inline int multicast_outbound(struct qdio_q *q)
+{
+ return (q->irq_ptr->nr_output_qs > 1) &&
+ (q->nr == q->irq_ptr->nr_output_qs - 1);
+}
+
+#define pci_out_supported(q) \
+ (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
+#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
+
+#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
+#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
+#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync))
+#define need_siga_sync_after_ai(q) \
+ (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
+#define need_siga_sync_out_after_pci(q) \
+ (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
+
+#define for_each_input_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_input_qs && \
+ ({ q = irq_ptr->input_qs[i]; 1; }); i++)
+#define for_each_output_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_output_qs && \
+ ({ q = irq_ptr->output_qs[i]; 1; }); i++)
+
+#define prev_buf(bufnr) \
+ ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
+#define next_buf(bufnr) \
+ ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
+#define add_buf(bufnr, inc) \
+ ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
+#define sub_buf(bufnr, dec) \
+ ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
+
+#define queue_irqs_enabled(q) \
+ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
+#define queue_irqs_disabled(q) \
+ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
+
+extern u64 last_ai_time;
+
+/* prototypes for thin interrupt */
+void qdio_setup_thinint(struct qdio_irq *irq_ptr);
+int qdio_establish_thinint(struct qdio_irq *irq_ptr);
+void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
+void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
+void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
+void tiqdio_inbound_processing(unsigned long q);
+int tiqdio_allocate_memory(void);
+void tiqdio_free_memory(void);
+int tiqdio_register_thinints(void);
+void tiqdio_unregister_thinints(void);
+void clear_nonshared_ind(struct qdio_irq *);
+int test_nonshared_ind(struct qdio_irq *);
+
+/* prototypes for setup */
+void qdio_inbound_processing(unsigned long data);
+void qdio_outbound_processing(unsigned long data);
+void qdio_outbound_timer(unsigned long data);
+void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb);
+int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
+ int nr_output_qs);
+void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
+int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
+ struct subchannel_id *schid,
+ struct qdio_ssqd_desc *data);
+int qdio_setup_irq(struct qdio_initialize *init_data);
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
+ struct ccw_device *cdev);
+void qdio_release_memory(struct qdio_irq *irq_ptr);
+int qdio_setup_create_sysfs(struct ccw_device *cdev);
+void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
+int qdio_setup_init(void);
+void qdio_setup_exit(void);
+int qdio_enable_async_operation(struct qdio_output_q *q);
+void qdio_disable_async_operation(struct qdio_output_q *q);
+struct qaob *qdio_allocate_aob(void);
+
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state);
+#endif /* _CIO_QDIO_H */
diff --git a/kernel/drivers/s390/cio/qdio_debug.c b/kernel/drivers/s390/cio/qdio_debug.c
new file mode 100644
index 000000000..f1f3baa8e
--- /dev/null
+++ b/kernel/drivers/s390/cio/qdio_debug.c
@@ -0,0 +1,373 @@
+/*
+ * Copyright IBM Corp. 2008, 2009
+ *
+ * Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <asm/debug.h>
+#include "qdio_debug.h"
+#include "qdio.h"
+
+debug_info_t *qdio_dbf_setup;
+debug_info_t *qdio_dbf_error;
+
+static struct dentry *debugfs_root;
+#define QDIO_DEBUGFS_NAME_LEN 10
+#define QDIO_DBF_NAME_LEN 20
+
+struct qdio_dbf_entry {
+ char dbf_name[QDIO_DBF_NAME_LEN];
+ debug_info_t *dbf_info;
+ struct list_head dbf_list;
+};
+
+static LIST_HEAD(qdio_dbf_list);
+static DEFINE_MUTEX(qdio_dbf_list_mutex);
+
+static debug_info_t *qdio_get_dbf_entry(char *name)
+{
+ struct qdio_dbf_entry *entry;
+ debug_info_t *rc = NULL;
+
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_for_each_entry(entry, &qdio_dbf_list, dbf_list) {
+ if (strcmp(entry->dbf_name, name) == 0) {
+ rc = entry->dbf_info;
+ break;
+ }
+ }
+ mutex_unlock(&qdio_dbf_list_mutex);
+ return rc;
+}
+
+static void qdio_clear_dbf_list(void)
+{
+ struct qdio_dbf_entry *entry, *tmp;
+
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) {
+ list_del(&entry->dbf_list);
+ debug_unregister(entry->dbf_info);
+ kfree(entry);
+ }
+ mutex_unlock(&qdio_dbf_list_mutex);
+}
+
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
+ struct qdio_irq *irq_ptr)
+{
+ char text[QDIO_DBF_NAME_LEN];
+ struct qdio_dbf_entry *new_entry;
+
+ DBF_EVENT("qfmt:%1d", init_data->q_format);
+ DBF_HEX(init_data->adapter_name, 8);
+ DBF_EVENT("qpff%4x", init_data->qib_param_field_format);
+ DBF_HEX(&init_data->qib_param_field, sizeof(void *));
+ DBF_HEX(&init_data->input_slib_elements, sizeof(void *));
+ DBF_HEX(&init_data->output_slib_elements, sizeof(void *));
+ DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs,
+ init_data->no_output_qs);
+ DBF_HEX(&init_data->input_handler, sizeof(void *));
+ DBF_HEX(&init_data->output_handler, sizeof(void *));
+ DBF_HEX(&init_data->int_parm, sizeof(long));
+ DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
+ DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
+ DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
+
+ /* allocate trace view for the interface */
+ snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
+ dev_name(&init_data->cdev->dev));
+ irq_ptr->debug_area = qdio_get_dbf_entry(text);
+ if (irq_ptr->debug_area)
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
+ else {
+ irq_ptr->debug_area = debug_register(text, 2, 1, 16);
+ if (!irq_ptr->debug_area)
+ return -ENOMEM;
+ if (debug_register_view(irq_ptr->debug_area,
+ &debug_hex_ascii_view)) {
+ debug_unregister(irq_ptr->debug_area);
+ return -ENOMEM;
+ }
+ debug_set_level(irq_ptr->debug_area, DBF_WARN);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+ new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL);
+ if (!new_entry) {
+ debug_unregister(irq_ptr->debug_area);
+ return -ENOMEM;
+ }
+ strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
+ new_entry->dbf_info = irq_ptr->debug_area;
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_add(&new_entry->dbf_list, &qdio_dbf_list);
+ mutex_unlock(&qdio_dbf_list_mutex);
+ }
+ return 0;
+}
+
+static int qstat_show(struct seq_file *m, void *v)
+{
+ unsigned char state;
+ struct qdio_q *q = m->private;
+ int i;
+
+ if (!q)
+ return 0;
+
+ seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n",
+ q->timestamp, last_ai_time);
+ seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n",
+ atomic_read(&q->nr_buf_used),
+ q->first_to_check, q->last_move);
+ if (q->is_input_q) {
+ seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
+ q->u.in.polling, q->u.in.ack_start,
+ q->u.in.ack_count);
+ seq_printf(m, "DSCI: %d IRQs disabled: %u\n",
+ *(u32 *)q->irq_ptr->dsci,
+ test_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state));
+ }
+ seq_printf(m, "SBAL states:\n");
+ seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
+
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
+ debug_get_buf_state(q, i, &state);
+ switch (state) {
+ case SLSB_P_INPUT_NOT_INIT:
+ case SLSB_P_OUTPUT_NOT_INIT:
+ seq_printf(m, "N");
+ break;
+ case SLSB_P_OUTPUT_PENDING:
+ seq_printf(m, "P");
+ break;
+ case SLSB_P_INPUT_PRIMED:
+ case SLSB_CU_OUTPUT_PRIMED:
+ seq_printf(m, "+");
+ break;
+ case SLSB_P_INPUT_ACK:
+ seq_printf(m, "A");
+ break;
+ case SLSB_P_INPUT_ERROR:
+ case SLSB_P_OUTPUT_ERROR:
+ seq_printf(m, "x");
+ break;
+ case SLSB_CU_INPUT_EMPTY:
+ case SLSB_P_OUTPUT_EMPTY:
+ seq_printf(m, "-");
+ break;
+ case SLSB_P_INPUT_HALTED:
+ case SLSB_P_OUTPUT_HALTED:
+ seq_printf(m, ".");
+ break;
+ default:
+ seq_printf(m, "?");
+ }
+ if (i == 63)
+ seq_printf(m, "\n");
+ }
+ seq_printf(m, "\n");
+ seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
+
+ seq_printf(m, "\nSBAL statistics:");
+ if (!q->irq_ptr->perf_stat_enabled) {
+ seq_printf(m, " disabled\n");
+ return 0;
+ }
+
+ seq_printf(m, "\n1 2.. 4.. 8.. "
+ "16.. 32.. 64.. 127\n");
+ for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
+ seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
+ seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
+ q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
+ q->q_stats.nr_sbal_total);
+ return 0;
+}
+
+static int qstat_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, qstat_show,
+ file_inode(filp)->i_private);
+}
+
+static const struct file_operations debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = qstat_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static char *qperf_names[] = {
+ "Assumed adapter interrupts",
+ "QDIO interrupts",
+ "Requested PCIs",
+ "Inbound tasklet runs",
+ "Inbound tasklet resched",
+ "Inbound tasklet resched2",
+ "Outbound tasklet runs",
+ "SIGA read",
+ "SIGA write",
+ "SIGA sync",
+ "Inbound calls",
+ "Inbound handler",
+ "Inbound stop_polling",
+ "Inbound queue full",
+ "Outbound calls",
+ "Outbound handler",
+ "Outbound queue full",
+ "Outbound fast_requeue",
+ "Outbound target_full",
+ "QEBSM eqbs",
+ "QEBSM eqbs partial",
+ "QEBSM sqbs",
+ "QEBSM sqbs partial",
+ "Discarded interrupts"
+};
+
+static int qperf_show(struct seq_file *m, void *v)
+{
+ struct qdio_irq *irq_ptr = m->private;
+ unsigned int *stat;
+ int i;
+
+ if (!irq_ptr)
+ return 0;
+ if (!irq_ptr->perf_stat_enabled) {
+ seq_printf(m, "disabled\n");
+ return 0;
+ }
+ stat = (unsigned int *)&irq_ptr->perf_stat;
+
+ for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
+ seq_printf(m, "%26s:\t%u\n",
+ qperf_names[i], *(stat + i));
+ return 0;
+}
+
+static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct qdio_irq *irq_ptr = seq->private;
+ struct qdio_q *q;
+ unsigned long val;
+ int ret, i;
+
+ if (!irq_ptr)
+ return 0;
+
+ ret = kstrtoul_from_user(ubuf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0:
+ irq_ptr->perf_stat_enabled = 0;
+ memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+ for_each_input_queue(irq_ptr, q, i)
+ memset(&q->q_stats, 0, sizeof(q->q_stats));
+ for_each_output_queue(irq_ptr, q, i)
+ memset(&q->q_stats, 0, sizeof(q->q_stats));
+ break;
+ case 1:
+ irq_ptr->perf_stat_enabled = 1;
+ break;
+ }
+ return count;
+}
+
+static int qperf_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, qperf_show,
+ file_inode(filp)->i_private);
+}
+
+static const struct file_operations debugfs_perf_fops = {
+ .owner = THIS_MODULE,
+ .open = qperf_seq_open,
+ .read = seq_read,
+ .write = qperf_seq_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void setup_debugfs_entry(struct qdio_q *q)
+{
+ char name[QDIO_DEBUGFS_NAME_LEN];
+
+ snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
+ q->is_input_q ? "input" : "output",
+ q->nr);
+ q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
+ q->irq_ptr->debugfs_dev, q, &debugfs_fops);
+ if (IS_ERR(q->debugfs_q))
+ q->debugfs_q = NULL;
+}
+
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+{
+ struct qdio_q *q;
+ int i;
+
+ irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
+ debugfs_root);
+ if (IS_ERR(irq_ptr->debugfs_dev))
+ irq_ptr->debugfs_dev = NULL;
+
+ irq_ptr->debugfs_perf = debugfs_create_file("statistics",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ irq_ptr->debugfs_dev, irq_ptr,
+ &debugfs_perf_fops);
+ if (IS_ERR(irq_ptr->debugfs_perf))
+ irq_ptr->debugfs_perf = NULL;
+
+ for_each_input_queue(irq_ptr, q, i)
+ setup_debugfs_entry(q);
+ for_each_output_queue(irq_ptr, q, i)
+ setup_debugfs_entry(q);
+}
+
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+ int i;
+
+ for_each_input_queue(irq_ptr, q, i)
+ debugfs_remove(q->debugfs_q);
+ for_each_output_queue(irq_ptr, q, i)
+ debugfs_remove(q->debugfs_q);
+ debugfs_remove(irq_ptr->debugfs_perf);
+ debugfs_remove(irq_ptr->debugfs_dev);
+}
+
+int __init qdio_debug_init(void)
+{
+ debugfs_root = debugfs_create_dir("qdio", NULL);
+
+ qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
+ debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_setup, DBF_INFO);
+ DBF_EVENT("dbf created\n");
+
+ qdio_dbf_error = debug_register("qdio_error", 4, 1, 16);
+ debug_register_view(qdio_dbf_error, &debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_error, DBF_INFO);
+ DBF_ERROR("dbf created\n");
+ return 0;
+}
+
+void qdio_debug_exit(void)
+{
+ qdio_clear_dbf_list();
+ debugfs_remove(debugfs_root);
+ if (qdio_dbf_setup)
+ debug_unregister(qdio_dbf_setup);
+ if (qdio_dbf_error)
+ debug_unregister(qdio_dbf_error);
+}
diff --git a/kernel/drivers/s390/cio/qdio_debug.h b/kernel/drivers/s390/cio/qdio_debug.h
new file mode 100644
index 000000000..f33ce8577
--- /dev/null
+++ b/kernel/drivers/s390/cio/qdio_debug.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright IBM Corp. 2008
+ *
+ * Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#ifndef QDIO_DEBUG_H
+#define QDIO_DEBUG_H
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include "qdio.h"
+
+/* that gives us 15 characters in the text event views */
+#define QDIO_DBF_LEN 16
+
+extern debug_info_t *qdio_dbf_setup;
+extern debug_info_t *qdio_dbf_error;
+
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARN 4 /* warning conditions */
+#define DBF_INFO 6 /* informational */
+
+#undef DBF_EVENT
+#undef DBF_ERROR
+#undef DBF_DEV_EVENT
+
+#define DBF_EVENT(text...) \
+ do { \
+ char debug_buffer[QDIO_DBF_LEN]; \
+ snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+ debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \
+ } while (0)
+
+static inline void DBF_HEX(void *addr, int len)
+{
+ while (len > 0) {
+ debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
+ len -= qdio_dbf_setup->buf_size;
+ addr += qdio_dbf_setup->buf_size;
+ }
+}
+
+#define DBF_ERROR(text...) \
+ do { \
+ char debug_buffer[QDIO_DBF_LEN]; \
+ snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+ debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \
+ } while (0)
+
+static inline void DBF_ERROR_HEX(void *addr, int len)
+{
+ while (len > 0) {
+ debug_event(qdio_dbf_error, DBF_ERR, addr, len);
+ len -= qdio_dbf_error->buf_size;
+ addr += qdio_dbf_error->buf_size;
+ }
+}
+
+#define DBF_DEV_EVENT(level, device, text...) \
+ do { \
+ char debug_buffer[QDIO_DBF_LEN]; \
+ if (debug_level_enabled(device->debug_area, level)) { \
+ snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+ debug_text_event(device->debug_area, level, debug_buffer); \
+ } \
+ } while (0)
+
+static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
+ int len, int level)
+{
+ while (len > 0) {
+ debug_event(dev->debug_area, level, addr, len);
+ len -= dev->debug_area->buf_size;
+ addr += dev->debug_area->buf_size;
+ }
+}
+
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
+ struct qdio_irq *irq_ptr);
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
+ struct ccw_device *cdev);
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr);
+int qdio_debug_init(void);
+void qdio_debug_exit(void);
+
+#endif
diff --git a/kernel/drivers/s390/cio/qdio_main.c b/kernel/drivers/s390/cio/qdio_main.c
new file mode 100644
index 000000000..848e3b64e
--- /dev/null
+++ b/kernel/drivers/s390/cio/qdio_main.c
@@ -0,0 +1,1880 @@
+/*
+ * Linux for s390 qdio support, buffer handling, qdio API and module support.
+ *
+ * Copyright IBM Corp. 2000, 2008
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/ipl.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
+ "Jan Glauber <jang@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("QDIO base support");
+MODULE_LICENSE("GPL");
+
+static inline int do_siga_sync(unsigned long schid,
+ unsigned int out_mask, unsigned int in_mask,
+ unsigned int fc)
+{
+ register unsigned long __fc asm ("0") = fc;
+ register unsigned long __schid asm ("1") = schid;
+ register unsigned long out asm ("2") = out_mask;
+ register unsigned long in asm ("3") = in_mask;
+ int cc;
+
+ asm volatile(
+ " siga 0\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc)
+ : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
+ return cc;
+}
+
+static inline int do_siga_input(unsigned long schid, unsigned int mask,
+ unsigned int fc)
+{
+ register unsigned long __fc asm ("0") = fc;
+ register unsigned long __schid asm ("1") = schid;
+ register unsigned long __mask asm ("2") = mask;
+ int cc;
+
+ asm volatile(
+ " siga 0\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc)
+ : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
+ return cc;
+}
+
+/**
+ * do_siga_output - perform SIGA-w/wt function
+ * @schid: subchannel id or in case of QEBSM the subchannel token
+ * @mask: which output queues to process
+ * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
+ * @fc: function code to perform
+ *
+ * Returns condition code.
+ * Note: For IQDC unicast queues only the highest priority queue is processed.
+ */
+static inline int do_siga_output(unsigned long schid, unsigned long mask,
+ unsigned int *bb, unsigned int fc,
+ unsigned long aob)
+{
+ register unsigned long __fc asm("0") = fc;
+ register unsigned long __schid asm("1") = schid;
+ register unsigned long __mask asm("2") = mask;
+ register unsigned long __aob asm("3") = aob;
+ int cc;
+
+ asm volatile(
+ " siga 0\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc), "+d" (__fc), "+d" (__aob)
+ : "d" (__schid), "d" (__mask)
+ : "cc");
+ *bb = __fc >> 31;
+ return cc;
+}
+
+static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
+{
+ /* all done or next buffer state different */
+ if (ccq == 0 || ccq == 32)
+ return 0;
+ /* no buffer processed */
+ if (ccq == 97)
+ return 1;
+ /* not all buffers processed */
+ if (ccq == 96)
+ return 2;
+ /* notify devices immediately */
+ DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
+ return -EIO;
+}
+
+/**
+ * qdio_do_eqbs - extract buffer states for QEBSM
+ * @q: queue to manipulate
+ * @state: state of the extracted buffers
+ * @start: buffer number to start at
+ * @count: count of buffers to examine
+ * @auto_ack: automatically acknowledge buffers
+ *
+ * Returns the number of successfully extracted equal buffer states.
+ * Stops processing if a state is different from the last buffers state.
+ */
+static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
+ int start, int count, int auto_ack)
+{
+ int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
+ unsigned int ccq = 0;
+
+ qperf_inc(q, eqbs);
+
+ if (!q->is_input_q)
+ nr += q->irq_ptr->nr_input_qs;
+again:
+ ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
+ auto_ack);
+ rc = qdio_check_ccq(q, ccq);
+ if (!rc)
+ return count - tmp_count;
+
+ if (rc == 1) {
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
+ goto again;
+ }
+
+ if (rc == 2) {
+ qperf_inc(q, eqbs_partial);
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+ tmp_count);
+ /*
+ * Retry once, if that fails bail out and process the
+ * extracted buffers before trying again.
+ */
+ if (!retried++)
+ goto again;
+ else
+ return count - tmp_count;
+ }
+
+ DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
+ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
+ return 0;
+}
+
+/**
+ * qdio_do_sqbs - set buffer states for QEBSM
+ * @q: queue to manipulate
+ * @state: new state of the buffers
+ * @start: first buffer number to change
+ * @count: how many buffers to change
+ *
+ * Returns the number of successfully changed buffers.
+ * Does retrying until the specified count of buffer states is set or an
+ * error occurs.
+ */
+static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
+ int count)
+{
+ unsigned int ccq = 0;
+ int tmp_count = count, tmp_start = start;
+ int nr = q->nr;
+ int rc;
+
+ if (!count)
+ return 0;
+ qperf_inc(q, sqbs);
+
+ if (!q->is_input_q)
+ nr += q->irq_ptr->nr_input_qs;
+again:
+ ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
+ rc = qdio_check_ccq(q, ccq);
+ if (!rc) {
+ WARN_ON_ONCE(tmp_count);
+ return count - tmp_count;
+ }
+
+ if (rc == 1 || rc == 2) {
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
+ qperf_inc(q, sqbs_partial);
+ goto again;
+ }
+
+ DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
+ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
+ return 0;
+}
+
+/* returns number of examined buffers and their common state in *state */
+static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state, unsigned int count,
+ int auto_ack, int merge_pending)
+{
+ unsigned char __state = 0;
+ int i;
+
+ if (is_qebsm(q))
+ return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
+
+ for (i = 0; i < count; i++) {
+ if (!__state) {
+ __state = q->slsb.val[bufnr];
+ if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+ __state = SLSB_P_OUTPUT_EMPTY;
+ } else if (merge_pending) {
+ if ((q->slsb.val[bufnr] & __state) != __state)
+ break;
+ } else if (q->slsb.val[bufnr] != __state)
+ break;
+ bufnr = next_buf(bufnr);
+ }
+ *state = __state;
+ return i;
+}
+
+static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state, int auto_ack)
+{
+ return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
+}
+
+/* wrap-around safe setting of slsb states, returns number of changed buffers */
+static inline int set_buf_states(struct qdio_q *q, int bufnr,
+ unsigned char state, int count)
+{
+ int i;
+
+ if (is_qebsm(q))
+ return qdio_do_sqbs(q, state, bufnr, count);
+
+ for (i = 0; i < count; i++) {
+ xchg(&q->slsb.val[bufnr], state);
+ bufnr = next_buf(bufnr);
+ }
+ return count;
+}
+
+static inline int set_buf_state(struct qdio_q *q, int bufnr,
+ unsigned char state)
+{
+ return set_buf_states(q, bufnr, state, 1);
+}
+
+/* set slsb states to initial state */
+static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+ int i;
+
+ for_each_input_queue(irq_ptr, q, i)
+ set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
+ QDIO_MAX_BUFFERS_PER_Q);
+ for_each_output_queue(irq_ptr, q, i)
+ set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
+ QDIO_MAX_BUFFERS_PER_Q);
+}
+
+static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
+ unsigned int input)
+{
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_SYNC;
+ int cc;
+
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
+ qperf_inc(q, siga_sync);
+
+ if (is_qebsm(q)) {
+ schid = q->irq_ptr->sch_token;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
+ }
+
+ cc = do_siga_sync(schid, output, input, fc);
+ if (unlikely(cc))
+ DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
+ return (cc) ? -EIO : 0;
+}
+
+static inline int qdio_siga_sync_q(struct qdio_q *q)
+{
+ if (q->is_input_q)
+ return qdio_siga_sync(q, 0, q->mask);
+ else
+ return qdio_siga_sync(q, q->mask, 0);
+}
+
+static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
+ unsigned long aob)
+{
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_WRITE;
+ u64 start_time = 0;
+ int retries = 0, cc;
+ unsigned long laob = 0;
+
+ if (q->u.out.use_cq && aob != 0) {
+ fc = QDIO_SIGA_WRITEQ;
+ laob = aob;
+ }
+
+ if (is_qebsm(q)) {
+ schid = q->irq_ptr->sch_token;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
+ }
+again:
+ WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
+ (aob && fc != QDIO_SIGA_WRITEQ));
+ cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
+
+ /* hipersocket busy condition */
+ if (unlikely(*busy_bit)) {
+ retries++;
+
+ if (!start_time) {
+ start_time = get_tod_clock_fast();
+ goto again;
+ }
+ if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
+ goto again;
+ }
+ if (retries) {
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
+ "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
+ }
+ return cc;
+}
+
+static inline int qdio_siga_input(struct qdio_q *q)
+{
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_READ;
+ int cc;
+
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
+ qperf_inc(q, siga_read);
+
+ if (is_qebsm(q)) {
+ schid = q->irq_ptr->sch_token;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
+ }
+
+ cc = do_siga_input(schid, q->mask, fc);
+ if (unlikely(cc))
+ DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
+ return (cc) ? -EIO : 0;
+}
+
+#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
+#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
+
+static inline void qdio_sync_queues(struct qdio_q *q)
+{
+ /* PCI capable outbound queues will also be scanned so sync them too */
+ if (pci_out_supported(q))
+ qdio_siga_sync_all(q);
+ else
+ qdio_siga_sync_q(q);
+}
+
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state)
+{
+ if (need_siga_sync(q))
+ qdio_siga_sync_q(q);
+ return get_buf_states(q, bufnr, state, 1, 0, 0);
+}
+
+static inline void qdio_stop_polling(struct qdio_q *q)
+{
+ if (!q->u.in.polling)
+ return;
+
+ q->u.in.polling = 0;
+ qperf_inc(q, stop_polling);
+
+ /* show the card that we are not polling anymore */
+ if (is_qebsm(q)) {
+ set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
+ q->u.in.ack_count);
+ q->u.in.ack_count = 0;
+ } else
+ set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
+}
+
+static inline void account_sbals(struct qdio_q *q, unsigned int count)
+{
+ int pos;
+
+ q->q_stats.nr_sbal_total += count;
+ if (count == QDIO_MAX_BUFFERS_MASK) {
+ q->q_stats.nr_sbals[7]++;
+ return;
+ }
+ pos = ilog2(count);
+ q->q_stats.nr_sbals[pos]++;
+}
+
+static void process_buffer_error(struct qdio_q *q, int count)
+{
+ unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
+ SLSB_P_OUTPUT_NOT_INIT;
+
+ q->qdio_error = QDIO_ERROR_SLSB_STATE;
+
+ /* special handling for no target buffer empty */
+ if ((!q->is_input_q &&
+ (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
+ qperf_inc(q, target_full);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
+ q->first_to_check);
+ goto set;
+ }
+
+ DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
+ DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
+ DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
+ DBF_ERROR("F14:%2x F15:%2x",
+ q->sbal[q->first_to_check]->element[14].sflags,
+ q->sbal[q->first_to_check]->element[15].sflags);
+
+set:
+ /*
+ * Interrupts may be avoided as long as the error is present
+ * so change the buffer state immediately to avoid starvation.
+ */
+ set_buf_states(q, q->first_to_check, state, count);
+}
+
+static inline void inbound_primed(struct qdio_q *q, int count)
+{
+ int new;
+
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
+
+ /* for QEBSM the ACK was already set by EQBS */
+ if (is_qebsm(q)) {
+ if (!q->u.in.polling) {
+ q->u.in.polling = 1;
+ q->u.in.ack_count = count;
+ q->u.in.ack_start = q->first_to_check;
+ return;
+ }
+
+ /* delete the previous ACK's */
+ set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
+ q->u.in.ack_count);
+ q->u.in.ack_count = count;
+ q->u.in.ack_start = q->first_to_check;
+ return;
+ }
+
+ /*
+ * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
+ * or by the next inbound run.
+ */
+ new = add_buf(q->first_to_check, count - 1);
+ if (q->u.in.polling) {
+ /* reset the previous ACK but first set the new one */
+ set_buf_state(q, new, SLSB_P_INPUT_ACK);
+ set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
+ } else {
+ q->u.in.polling = 1;
+ set_buf_state(q, new, SLSB_P_INPUT_ACK);
+ }
+
+ q->u.in.ack_start = new;
+ count--;
+ if (!count)
+ return;
+ /* need to change ALL buffers to get more interrupts */
+ set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
+}
+
+static int get_inbound_buffer_frontier(struct qdio_q *q)
+{
+ int count, stop;
+ unsigned char state = 0;
+
+ q->timestamp = get_tod_clock_fast();
+
+ /*
+ * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
+ * would return 0.
+ */
+ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ stop = add_buf(q->first_to_check, count);
+
+ if (q->first_to_check == stop)
+ goto out;
+
+ /*
+ * No siga sync here, as a PCI or we after a thin interrupt
+ * already sync'ed the queues.
+ */
+ count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
+ if (!count)
+ goto out;
+
+ switch (state) {
+ case SLSB_P_INPUT_PRIMED:
+ inbound_primed(q, count);
+ q->first_to_check = add_buf(q->first_to_check, count);
+ if (atomic_sub_return(count, &q->nr_buf_used) == 0)
+ qperf_inc(q, inbound_queue_full);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
+ break;
+ case SLSB_P_INPUT_ERROR:
+ process_buffer_error(q, count);
+ q->first_to_check = add_buf(q->first_to_check, count);
+ atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
+ break;
+ case SLSB_CU_INPUT_EMPTY:
+ case SLSB_P_INPUT_NOT_INIT:
+ case SLSB_P_INPUT_ACK:
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+out:
+ return q->first_to_check;
+}
+
+static int qdio_inbound_q_moved(struct qdio_q *q)
+{
+ int bufnr;
+
+ bufnr = get_inbound_buffer_frontier(q);
+
+ if (bufnr != q->last_move) {
+ q->last_move = bufnr;
+ if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
+ q->u.in.timestamp = get_tod_clock();
+ return 1;
+ } else
+ return 0;
+}
+
+static inline int qdio_inbound_q_done(struct qdio_q *q)
+{
+ unsigned char state = 0;
+
+ if (!atomic_read(&q->nr_buf_used))
+ return 1;
+
+ if (need_siga_sync(q))
+ qdio_siga_sync_q(q);
+ get_buf_state(q, q->first_to_check, &state, 0);
+
+ if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
+ /* more work coming */
+ return 0;
+
+ if (is_thinint_irq(q->irq_ptr))
+ return 1;
+
+ /* don't poll under z/VM */
+ if (MACHINE_IS_VM)
+ return 1;
+
+ /*
+ * At this point we know, that inbound first_to_check
+ * has (probably) not moved (see qdio_inbound_processing).
+ */
+ if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
+ q->first_to_check);
+ return 1;
+ } else
+ return 0;
+}
+
+static inline int contains_aobs(struct qdio_q *q)
+{
+ return !q->is_input_q && q->u.out.use_cq;
+}
+
+static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
+{
+ unsigned char state = 0;
+ int j, b = start;
+
+ if (!contains_aobs(q))
+ return;
+
+ for (j = 0; j < count; ++j) {
+ get_buf_state(q, b, &state, 0);
+ if (state == SLSB_P_OUTPUT_PENDING) {
+ struct qaob *aob = q->u.out.aobs[b];
+ if (aob == NULL)
+ continue;
+
+ q->u.out.sbal_state[b].flags |=
+ QDIO_OUTBUF_STATE_FLAG_PENDING;
+ q->u.out.aobs[b] = NULL;
+ } else if (state == SLSB_P_OUTPUT_EMPTY) {
+ q->u.out.sbal_state[b].aob = NULL;
+ }
+ b = next_buf(b);
+ }
+}
+
+static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
+ int bufnr)
+{
+ unsigned long phys_aob = 0;
+
+ if (!q->use_cq)
+ goto out;
+
+ if (!q->aobs[bufnr]) {
+ struct qaob *aob = qdio_allocate_aob();
+ q->aobs[bufnr] = aob;
+ }
+ if (q->aobs[bufnr]) {
+ q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
+ q->sbal_state[bufnr].aob = q->aobs[bufnr];
+ q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
+ phys_aob = virt_to_phys(q->aobs[bufnr]);
+ WARN_ON_ONCE(phys_aob & 0xFF);
+ }
+
+out:
+ return phys_aob;
+}
+
+static void qdio_kick_handler(struct qdio_q *q)
+{
+ int start = q->first_to_kick;
+ int end = q->first_to_check;
+ int count;
+
+ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+ return;
+
+ count = sub_buf(end, start);
+
+ if (q->is_input_q) {
+ qperf_inc(q, inbound_handler);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
+ } else {
+ qperf_inc(q, outbound_handler);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
+ start, count);
+ }
+
+ qdio_handle_aobs(q, start, count);
+
+ q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
+ q->irq_ptr->int_parm);
+
+ /* for the next time */
+ q->first_to_kick = end;
+ q->qdio_error = 0;
+}
+
+static void __qdio_inbound_processing(struct qdio_q *q)
+{
+ qperf_inc(q, tasklet_inbound);
+
+ if (!qdio_inbound_q_moved(q))
+ return;
+
+ qdio_kick_handler(q);
+
+ if (!qdio_inbound_q_done(q)) {
+ /* means poll time is not yet over */
+ qperf_inc(q, tasklet_inbound_resched);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
+ tasklet_schedule(&q->tasklet);
+ return;
+ }
+ }
+
+ qdio_stop_polling(q);
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (!qdio_inbound_q_done(q)) {
+ qperf_inc(q, tasklet_inbound_resched2);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
+}
+
+void qdio_inbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __qdio_inbound_processing(q);
+}
+
+static int get_outbound_buffer_frontier(struct qdio_q *q)
+{
+ int count, stop;
+ unsigned char state = 0;
+
+ q->timestamp = get_tod_clock_fast();
+
+ if (need_siga_sync(q))
+ if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
+ !pci_out_supported(q)) ||
+ (queue_type(q) == QDIO_IQDIO_QFMT &&
+ multicast_outbound(q)))
+ qdio_siga_sync_q(q);
+
+ /*
+ * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
+ * would return 0.
+ */
+ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ stop = add_buf(q->first_to_check, count);
+ if (q->first_to_check == stop)
+ goto out;
+
+ count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
+ if (!count)
+ goto out;
+
+ switch (state) {
+ case SLSB_P_OUTPUT_EMPTY:
+ /* the adapter got it */
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
+ "out empty:%1d %02x", q->nr, count);
+
+ atomic_sub(count, &q->nr_buf_used);
+ q->first_to_check = add_buf(q->first_to_check, count);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
+
+ break;
+ case SLSB_P_OUTPUT_ERROR:
+ process_buffer_error(q, count);
+ q->first_to_check = add_buf(q->first_to_check, count);
+ atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
+ break;
+ case SLSB_CU_OUTPUT_PRIMED:
+ /* the adapter has not fetched the output yet */
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
+ q->nr);
+ break;
+ case SLSB_P_OUTPUT_NOT_INIT:
+ case SLSB_P_OUTPUT_HALTED:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+out:
+ return q->first_to_check;
+}
+
+/* all buffers processed? */
+static inline int qdio_outbound_q_done(struct qdio_q *q)
+{
+ return atomic_read(&q->nr_buf_used) == 0;
+}
+
+static inline int qdio_outbound_q_moved(struct qdio_q *q)
+{
+ int bufnr;
+
+ bufnr = get_outbound_buffer_frontier(q);
+
+ if (bufnr != q->last_move) {
+ q->last_move = bufnr;
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
+ return 1;
+ } else
+ return 0;
+}
+
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
+{
+ int retries = 0, cc;
+ unsigned int busy_bit;
+
+ if (!need_siga_out(q))
+ return 0;
+
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
+retry:
+ qperf_inc(q, siga_write);
+
+ cc = qdio_siga_output(q, &busy_bit, aob);
+ switch (cc) {
+ case 0:
+ break;
+ case 2:
+ if (busy_bit) {
+ while (++retries < QDIO_BUSY_BIT_RETRIES) {
+ mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
+ goto retry;
+ }
+ DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
+ cc = -EBUSY;
+ } else {
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
+ cc = -ENOBUFS;
+ }
+ break;
+ case 1:
+ case 3:
+ DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
+ cc = -EIO;
+ break;
+ }
+ if (retries) {
+ DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
+ DBF_ERROR("count:%u", retries);
+ }
+ return cc;
+}
+
+static void __qdio_outbound_processing(struct qdio_q *q)
+{
+ qperf_inc(q, tasklet_outbound);
+ WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
+
+ if (qdio_outbound_q_moved(q))
+ qdio_kick_handler(q);
+
+ if (queue_type(q) == QDIO_ZFCP_QFMT)
+ if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
+ goto sched;
+
+ if (q->u.out.pci_out_enabled)
+ return;
+
+ /*
+ * Now we know that queue type is either qeth without pci enabled
+ * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
+ * is noticed and outbound_handler is called after some time.
+ */
+ if (qdio_outbound_q_done(q))
+ del_timer(&q->u.out.timer);
+ else
+ if (!timer_pending(&q->u.out.timer))
+ mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
+ return;
+
+sched:
+ if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+ return;
+ tasklet_schedule(&q->tasklet);
+}
+
+/* outbound tasklet */
+void qdio_outbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __qdio_outbound_processing(q);
+}
+
+void qdio_outbound_timer(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+
+ if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+ return;
+ tasklet_schedule(&q->tasklet);
+}
+
+static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
+{
+ struct qdio_q *out;
+ int i;
+
+ if (!pci_out_supported(q))
+ return;
+
+ for_each_output_queue(q->irq_ptr, out, i)
+ if (!qdio_outbound_q_done(out))
+ tasklet_schedule(&out->tasklet);
+}
+
+static void __tiqdio_inbound_processing(struct qdio_q *q)
+{
+ qperf_inc(q, tasklet_inbound);
+ if (need_siga_sync(q) && need_siga_sync_after_ai(q))
+ qdio_sync_queues(q);
+
+ /*
+ * The interrupt could be caused by a PCI request. Check the
+ * PCI capable outbound queues.
+ */
+ qdio_check_outbound_after_thinint(q);
+
+ if (!qdio_inbound_q_moved(q))
+ return;
+
+ qdio_kick_handler(q);
+
+ if (!qdio_inbound_q_done(q)) {
+ qperf_inc(q, tasklet_inbound_resched);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
+ tasklet_schedule(&q->tasklet);
+ return;
+ }
+ }
+
+ qdio_stop_polling(q);
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (!qdio_inbound_q_done(q)) {
+ qperf_inc(q, tasklet_inbound_resched2);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
+}
+
+void tiqdio_inbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __tiqdio_inbound_processing(q);
+}
+
+static inline void qdio_set_state(struct qdio_irq *irq_ptr,
+ enum qdio_irq_states state)
+{
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
+
+ irq_ptr->state = state;
+ mb();
+}
+
+static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
+{
+ if (irb->esw.esw0.erw.cons) {
+ DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
+ DBF_ERROR_HEX(irb, 64);
+ DBF_ERROR_HEX(irb->ecw, 64);
+ }
+}
+
+/* PCI interrupt handler */
+static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
+{
+ int i;
+ struct qdio_q *q;
+
+ if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+ return;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ if (q->u.in.queue_start_poll) {
+ /* skip if polling is enabled or already in work */
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state)) {
+ qperf_inc(q, int_discarded);
+ continue;
+ }
+ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+ q->irq_ptr->int_parm);
+ } else {
+ tasklet_schedule(&q->tasklet);
+ }
+ }
+
+ if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
+ return;
+
+ for_each_output_queue(irq_ptr, q, i) {
+ if (qdio_outbound_q_done(q))
+ continue;
+ if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
+ qdio_siga_sync_q(q);
+ tasklet_schedule(&q->tasklet);
+ }
+}
+
+static void qdio_handle_activate_check(struct ccw_device *cdev,
+ unsigned long intparm, int cstat, int dstat)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct qdio_q *q;
+ int count;
+
+ DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
+ DBF_ERROR("intp :%lx", intparm);
+ DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
+
+ if (irq_ptr->nr_input_qs) {
+ q = irq_ptr->input_qs[0];
+ } else if (irq_ptr->nr_output_qs) {
+ q = irq_ptr->output_qs[0];
+ } else {
+ dump_stack();
+ goto no_handler;
+ }
+
+ count = sub_buf(q->first_to_check, q->first_to_kick);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
+ q->nr, q->first_to_kick, count, irq_ptr->int_parm);
+no_handler:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+ /*
+ * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
+ * Therefore we call the LGR detection function here.
+ */
+ lgr_info_log();
+}
+
+static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
+ int dstat)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
+
+ if (cstat)
+ goto error;
+ if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
+ goto error;
+ if (!(dstat & DEV_STAT_DEV_END))
+ goto error;
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
+ return;
+
+error:
+ DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
+ DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+}
+
+/* qdio interrupt handler */
+void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ int cstat, dstat;
+
+ if (!intparm || !irq_ptr) {
+ DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
+ return;
+ }
+
+ if (irq_ptr->perf_stat_enabled)
+ irq_ptr->perf_stat.qdio_int++;
+
+ if (IS_ERR(irb)) {
+ DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
+ qdio_irq_check_sense(irq_ptr, irb);
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
+
+ switch (irq_ptr->state) {
+ case QDIO_IRQ_STATE_INACTIVE:
+ qdio_establish_handle_irq(cdev, cstat, dstat);
+ break;
+ case QDIO_IRQ_STATE_CLEANUP:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ break;
+ case QDIO_IRQ_STATE_ESTABLISHED:
+ case QDIO_IRQ_STATE_ACTIVE:
+ if (cstat & SCHN_STAT_PCI) {
+ qdio_int_handler_pci(irq_ptr);
+ return;
+ }
+ if (cstat || dstat)
+ qdio_handle_activate_check(cdev, intparm, cstat,
+ dstat);
+ break;
+ case QDIO_IRQ_STATE_STOPPED:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ wake_up(&cdev->private->wait_q);
+}
+
+/**
+ * qdio_get_ssqd_desc - get qdio subchannel description
+ * @cdev: ccw device to get description for
+ * @data: where to store the ssqd
+ *
+ * Returns 0 or an error code. The results of the chsc are stored in the
+ * specified structure.
+ */
+int qdio_get_ssqd_desc(struct ccw_device *cdev,
+ struct qdio_ssqd_desc *data)
+{
+
+ if (!cdev || !cdev->private)
+ return -EINVAL;
+
+ DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
+ return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
+}
+EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
+
+static void qdio_shutdown_queues(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct qdio_q *q;
+ int i;
+
+ for_each_input_queue(irq_ptr, q, i)
+ tasklet_kill(&q->tasklet);
+
+ for_each_output_queue(irq_ptr, q, i) {
+ del_timer(&q->u.out.timer);
+ tasklet_kill(&q->tasklet);
+ }
+}
+
+/**
+ * qdio_shutdown - shut down a qdio subchannel
+ * @cdev: associated ccw device
+ * @how: use halt or clear to shutdown
+ */
+int qdio_shutdown(struct ccw_device *cdev, int how)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ int rc;
+ unsigned long flags;
+
+ if (!irq_ptr)
+ return -ENODEV;
+
+ WARN_ON_ONCE(irqs_disabled());
+ DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
+
+ mutex_lock(&irq_ptr->setup_mutex);
+ /*
+ * Subchannel was already shot down. We cannot prevent being called
+ * twice since cio may trigger a shutdown asynchronously.
+ */
+ if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
+ mutex_unlock(&irq_ptr->setup_mutex);
+ return 0;
+ }
+
+ /*
+ * Indicate that the device is going down. Scheduling the queue
+ * tasklets is forbidden from here on.
+ */
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+
+ tiqdio_remove_input_queues(irq_ptr);
+ qdio_shutdown_queues(cdev);
+ qdio_shutdown_debug_entries(irq_ptr);
+
+ /* cleanup subchannel */
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+
+ if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
+ rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+ else
+ /* default behaviour is halt */
+ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+ if (rc) {
+ DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4d", rc);
+ goto no_cleanup;
+ }
+
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ wait_event_interruptible_timeout(cdev->private->wait_q,
+ irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
+ irq_ptr->state == QDIO_IRQ_STATE_ERR,
+ 10 * HZ);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+
+no_cleanup:
+ qdio_shutdown_thinint(irq_ptr);
+
+ /* restore interrupt handler */
+ if ((void *)cdev->handler == (void *)qdio_int_handler)
+ cdev->handler = irq_ptr->orig_handler;
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ mutex_unlock(&irq_ptr->setup_mutex);
+ if (rc)
+ return rc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_shutdown);
+
+/**
+ * qdio_free - free data structures for a qdio subchannel
+ * @cdev: associated ccw device
+ */
+int qdio_free(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+
+ DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
+ mutex_lock(&irq_ptr->setup_mutex);
+
+ irq_ptr->debug_area = NULL;
+ cdev->private->qdio_data = NULL;
+ mutex_unlock(&irq_ptr->setup_mutex);
+
+ qdio_release_memory(irq_ptr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_free);
+
+/**
+ * qdio_allocate - allocate qdio queues and associated data
+ * @init_data: initialization data
+ */
+int qdio_allocate(struct qdio_initialize *init_data)
+{
+ struct qdio_irq *irq_ptr;
+
+ DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
+
+ if ((init_data->no_input_qs && !init_data->input_handler) ||
+ (init_data->no_output_qs && !init_data->output_handler))
+ return -EINVAL;
+
+ if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
+ (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
+ return -EINVAL;
+
+ if ((!init_data->input_sbal_addr_array) ||
+ (!init_data->output_sbal_addr_array))
+ return -EINVAL;
+
+ /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
+ irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!irq_ptr)
+ goto out_err;
+
+ mutex_init(&irq_ptr->setup_mutex);
+ if (qdio_allocate_dbf(init_data, irq_ptr))
+ goto out_rel;
+
+ /*
+ * Allocate a page for the chsc calls in qdio_establish.
+ * Must be pre-allocated since a zfcp recovery will call
+ * qdio_establish. In case of low memory and swap on a zfcp disk
+ * we may not be able to allocate memory otherwise.
+ */
+ irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
+ if (!irq_ptr->chsc_page)
+ goto out_rel;
+
+ /* qdr is used in ccw1.cda which is u32 */
+ irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!irq_ptr->qdr)
+ goto out_rel;
+
+ if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
+ init_data->no_output_qs))
+ goto out_rel;
+
+ init_data->cdev->private->qdio_data = irq_ptr;
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ return 0;
+out_rel:
+ qdio_release_memory(irq_ptr);
+out_err:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(qdio_allocate);
+
+static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q = irq_ptr->input_qs[0];
+ int i, use_cq = 0;
+
+ if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
+ use_cq = 1;
+
+ for_each_output_queue(irq_ptr, q, i) {
+ if (use_cq) {
+ if (qdio_enable_async_operation(&q->u.out) < 0) {
+ use_cq = 0;
+ continue;
+ }
+ } else
+ qdio_disable_async_operation(&q->u.out);
+ }
+ DBF_EVENT("use_cq:%d", use_cq);
+}
+
+/**
+ * qdio_establish - establish queues on a qdio subchannel
+ * @init_data: initialization data
+ */
+int qdio_establish(struct qdio_initialize *init_data)
+{
+ struct qdio_irq *irq_ptr;
+ struct ccw_device *cdev = init_data->cdev;
+ unsigned long saveflags;
+ int rc;
+
+ DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EINVAL;
+
+ mutex_lock(&irq_ptr->setup_mutex);
+ qdio_setup_irq(init_data);
+
+ rc = qdio_establish_thinint(irq_ptr);
+ if (rc) {
+ mutex_unlock(&irq_ptr->setup_mutex);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ return rc;
+ }
+
+ /* establish q */
+ irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
+ irq_ptr->ccw.flags = CCW_FLAG_SLI;
+ irq_ptr->ccw.count = irq_ptr->equeue.count;
+ irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+ ccw_device_set_options_mask(cdev, 0);
+
+ rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
+ if (rc) {
+ DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
+
+ if (rc) {
+ mutex_unlock(&irq_ptr->setup_mutex);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ return rc;
+ }
+
+ wait_event_interruptible_timeout(cdev->private->wait_q,
+ irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+ irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+
+ if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
+ mutex_unlock(&irq_ptr->setup_mutex);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ return -EIO;
+ }
+
+ qdio_setup_ssqd_info(irq_ptr);
+
+ qdio_detect_hsicq(irq_ptr);
+
+ /* qebsm is now setup if available, initialize buffer states */
+ qdio_init_buf_states(irq_ptr);
+
+ mutex_unlock(&irq_ptr->setup_mutex);
+ qdio_print_subchannel_info(irq_ptr, cdev);
+ qdio_setup_debug_entries(irq_ptr, cdev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_establish);
+
+/**
+ * qdio_activate - activate queues on a qdio subchannel
+ * @cdev: associated cdev
+ */
+int qdio_activate(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr;
+ int rc;
+ unsigned long saveflags;
+
+ DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EINVAL;
+
+ mutex_lock(&irq_ptr->setup_mutex);
+ if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
+ irq_ptr->ccw.flags = CCW_FLAG_SLI;
+ irq_ptr->ccw.count = irq_ptr->aqueue.count;
+ irq_ptr->ccw.cda = 0;
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+ ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
+
+ rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
+ 0, DOIO_DENY_PREFETCH);
+ if (rc) {
+ DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
+
+ if (rc)
+ goto out;
+
+ if (is_thinint_irq(irq_ptr))
+ tiqdio_add_input_queues(irq_ptr);
+
+ /* wait for subchannel to become active */
+ msleep(5);
+
+ switch (irq_ptr->state) {
+ case QDIO_IRQ_STATE_STOPPED:
+ case QDIO_IRQ_STATE_ERR:
+ rc = -EIO;
+ break;
+ default:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
+ rc = 0;
+ }
+out:
+ mutex_unlock(&irq_ptr->setup_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_activate);
+
+static inline int buf_in_between(int bufnr, int start, int count)
+{
+ int end = add_buf(start, count);
+
+ if (end > start) {
+ if (bufnr >= start && bufnr < end)
+ return 1;
+ else
+ return 0;
+ }
+
+ /* wrap-around case */
+ if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
+ (bufnr < end))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * handle_inbound - reset processed input buffers
+ * @q: queue containing the buffers
+ * @callflags: flags
+ * @bufnr: first buffer to process
+ * @count: how many buffers are emptied
+ */
+static int handle_inbound(struct qdio_q *q, unsigned int callflags,
+ int bufnr, int count)
+{
+ int diff;
+
+ qperf_inc(q, inbound_call);
+
+ if (!q->u.in.polling)
+ goto set;
+
+ /* protect against stop polling setting an ACK for an emptied slsb */
+ if (count == QDIO_MAX_BUFFERS_PER_Q) {
+ /* overwriting everything, just delete polling status */
+ q->u.in.polling = 0;
+ q->u.in.ack_count = 0;
+ goto set;
+ } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
+ if (is_qebsm(q)) {
+ /* partial overwrite, just update ack_start */
+ diff = add_buf(bufnr, count);
+ diff = sub_buf(diff, q->u.in.ack_start);
+ q->u.in.ack_count -= diff;
+ if (q->u.in.ack_count <= 0) {
+ q->u.in.polling = 0;
+ q->u.in.ack_count = 0;
+ goto set;
+ }
+ q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
+ }
+ else
+ /* the only ACK will be deleted, so stop polling */
+ q->u.in.polling = 0;
+ }
+
+set:
+ count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
+ atomic_add(count, &q->nr_buf_used);
+
+ if (need_siga_in(q))
+ return qdio_siga_input(q);
+
+ return 0;
+}
+
+/**
+ * handle_outbound - process filled outbound buffers
+ * @q: queue containing the buffers
+ * @callflags: flags
+ * @bufnr: first buffer to process
+ * @count: how many buffers are filled
+ */
+static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+ int bufnr, int count)
+{
+ unsigned char state = 0;
+ int used, rc = 0;
+
+ qperf_inc(q, outbound_call);
+
+ count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
+ used = atomic_add_return(count, &q->nr_buf_used);
+
+ if (used == QDIO_MAX_BUFFERS_PER_Q)
+ qperf_inc(q, outbound_queue_full);
+
+ if (callflags & QDIO_FLAG_PCI_OUT) {
+ q->u.out.pci_out_enabled = 1;
+ qperf_inc(q, pci_request_int);
+ } else
+ q->u.out.pci_out_enabled = 0;
+
+ if (queue_type(q) == QDIO_IQDIO_QFMT) {
+ unsigned long phys_aob = 0;
+
+ /* One SIGA-W per buffer required for unicast HSI */
+ WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
+
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+
+ rc = qdio_kick_outbound_q(q, phys_aob);
+ } else if (need_siga_sync(q)) {
+ rc = qdio_siga_sync_q(q);
+ } else {
+ /* try to fast requeue buffers */
+ get_buf_state(q, prev_buf(bufnr), &state, 0);
+ if (state != SLSB_CU_OUTPUT_PRIMED)
+ rc = qdio_kick_outbound_q(q, 0);
+ else
+ qperf_inc(q, fast_requeue);
+ }
+
+ /* in case of SIGA errors we must process the error immediately */
+ if (used >= q->u.out.scan_threshold || rc)
+ tasklet_schedule(&q->tasklet);
+ else
+ /* free the SBALs in case of no further traffic */
+ if (!timer_pending(&q->u.out.timer))
+ mod_timer(&q->u.out.timer, jiffies + HZ);
+ return rc;
+}
+
+/**
+ * do_QDIO - process input or output buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @callflags: input or output and special flags from the program
+ * @q_nr: queue number
+ * @bufnr: buffer number
+ * @count: how many buffers to process
+ */
+int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
+ int q_nr, unsigned int bufnr, unsigned int count)
+{
+ struct qdio_irq *irq_ptr;
+
+ if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
+ return -EINVAL;
+
+ irq_ptr = cdev->private->qdio_data;
+ if (!irq_ptr)
+ return -ENODEV;
+
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr,
+ "do%02x b:%02x c:%02x", callflags, bufnr, count);
+
+ if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
+ return -EIO;
+ if (!count)
+ return 0;
+ if (callflags & QDIO_FLAG_SYNC_INPUT)
+ return handle_inbound(irq_ptr->input_qs[q_nr],
+ callflags, bufnr, count);
+ else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
+ return handle_outbound(irq_ptr->output_qs[q_nr],
+ callflags, bufnr, count);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(do_QDIO);
+
+/**
+ * qdio_start_irq - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ * 0 - success
+ * 1 - irqs not started since new data is available
+ */
+int qdio_start_irq(struct ccw_device *cdev, int nr)
+{
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ clear_nonshared_ind(irq_ptr);
+ qdio_stop_polling(q);
+ clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
+
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (test_nonshared_ind(irq_ptr))
+ goto rescan;
+ if (!qdio_inbound_q_done(q))
+ goto rescan;
+ return 0;
+
+rescan:
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state))
+ return 0;
+ else
+ return 1;
+
+}
+EXPORT_SYMBOL(qdio_start_irq);
+
+/**
+ * qdio_get_next_buffers - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ * @bufnr: first filled buffer number
+ * @error: buffers are in error state
+ *
+ * Return codes
+ * < 0 - error
+ * = 0 - no new buffers found
+ * > 0 - number of processed buffers
+ */
+int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
+ int *error)
+{
+ struct qdio_q *q;
+ int start, end;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ /*
+ * Cannot rely on automatic sync after interrupt since queues may
+ * also be examined without interrupt.
+ */
+ if (need_siga_sync(q))
+ qdio_sync_queues(q);
+
+ /* check the PCI capable outbound queues. */
+ qdio_check_outbound_after_thinint(q);
+
+ if (!qdio_inbound_q_moved(q))
+ return 0;
+
+ /* Note: upper-layer MUST stop processing immediately here ... */
+ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+ return -EIO;
+
+ start = q->first_to_kick;
+ end = q->first_to_check;
+ *bufnr = start;
+ *error = q->qdio_error;
+
+ /* for the next time */
+ q->first_to_kick = end;
+ q->qdio_error = 0;
+ return sub_buf(end, start);
+}
+EXPORT_SYMBOL(qdio_get_next_buffers);
+
+/**
+ * qdio_stop_irq - disable interrupt processing for the device
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ * 0 - interrupts were already disabled
+ * 1 - interrupts successfully disabled
+ */
+int qdio_stop_irq(struct ccw_device *cdev, int nr)
+{
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state))
+ return 0;
+ else
+ return 1;
+}
+EXPORT_SYMBOL(qdio_stop_irq);
+
+/**
+ * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
+ * @schid: Subchannel ID.
+ * @cnc: Boolean Change-Notification Control
+ * @response: Response code will be stored at this address
+ * @cb: Callback function will be executed for each element
+ * of the address list
+ * @priv: Pointer passed from the caller to qdio_pnso_brinfo()
+ * @type: Type of the address entry passed to the callback
+ * @entry: Entry containg the address of the specified type
+ * @priv: Pointer to pass to the callback function.
+ *
+ * Performs "Store-network-bridging-information list" operation and calls
+ * the callback function for every entry in the list. If "change-
+ * notification-control" is set, further changes in the address list
+ * will be reported via the IPA command.
+ */
+int qdio_pnso_brinfo(struct subchannel_id schid,
+ int cnc, u16 *response,
+ void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
+ void *entry),
+ void *priv)
+{
+ struct chsc_pnso_area *rr;
+ int rc;
+ u32 prev_instance = 0;
+ int isfirstblock = 1;
+ int i, size, elems;
+
+ rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
+ if (rr == NULL)
+ return -ENOMEM;
+ do {
+ /* on the first iteration, naihdr.resume_token will be zero */
+ rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
+ if (rc != 0 && rc != -EBUSY)
+ goto out;
+ if (rr->response.code != 1) {
+ rc = -EIO;
+ continue;
+ } else
+ rc = 0;
+
+ if (cb == NULL)
+ continue;
+
+ size = rr->naihdr.naids;
+ elems = (rr->response.length -
+ sizeof(struct chsc_header) -
+ sizeof(struct chsc_brinfo_naihdr)) /
+ size;
+
+ if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
+ /* Inform the caller that they need to scrap */
+ /* the data that was already reported via cb */
+ rc = -EAGAIN;
+ break;
+ }
+ isfirstblock = 0;
+ prev_instance = rr->naihdr.instance;
+ for (i = 0; i < elems; i++)
+ switch (size) {
+ case sizeof(struct qdio_brinfo_entry_l3_ipv6):
+ (*cb)(priv, l3_ipv6_addr,
+ &rr->entries.l3_ipv6[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l3_ipv4):
+ (*cb)(priv, l3_ipv4_addr,
+ &rr->entries.l3_ipv4[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l2):
+ (*cb)(priv, l2_addr_lnid,
+ &rr->entries.l2[i]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ rc = -EIO;
+ goto out;
+ }
+ } while (rr->response.code == 0x0107 || /* channel busy */
+ (rr->response.code == 1 && /* list stored */
+ /* resume token is non-zero => list incomplete */
+ (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
+ (*response) = rr->response.code;
+
+out:
+ free_page((unsigned long)rr);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
+
+static int __init init_QDIO(void)
+{
+ int rc;
+
+ rc = qdio_debug_init();
+ if (rc)
+ return rc;
+ rc = qdio_setup_init();
+ if (rc)
+ goto out_debug;
+ rc = tiqdio_allocate_memory();
+ if (rc)
+ goto out_cache;
+ rc = tiqdio_register_thinints();
+ if (rc)
+ goto out_ti;
+ return 0;
+
+out_ti:
+ tiqdio_free_memory();
+out_cache:
+ qdio_setup_exit();
+out_debug:
+ qdio_debug_exit();
+ return rc;
+}
+
+static void __exit exit_QDIO(void)
+{
+ tiqdio_unregister_thinints();
+ tiqdio_free_memory();
+ qdio_setup_exit();
+ qdio_debug_exit();
+}
+
+module_init(init_QDIO);
+module_exit(exit_QDIO);
diff --git a/kernel/drivers/s390/cio/qdio_setup.c b/kernel/drivers/s390/cio/qdio_setup.c
new file mode 100644
index 000000000..48b3866a9
--- /dev/null
+++ b/kernel/drivers/s390/cio/qdio_setup.c
@@ -0,0 +1,597 @@
+/*
+ * qdio queue initialization
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <asm/qdio.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "chsc.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
+
+static struct kmem_cache *qdio_q_cache;
+static struct kmem_cache *qdio_aob_cache;
+
+struct qaob *qdio_allocate_aob(void)
+{
+ return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(qdio_allocate_aob);
+
+void qdio_release_aob(struct qaob *aob)
+{
+ kmem_cache_free(qdio_aob_cache, aob);
+}
+EXPORT_SYMBOL_GPL(qdio_release_aob);
+
+/**
+ * qdio_free_buffers() - free qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers to free
+ */
+void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+ int pos;
+
+ for (pos = 0; pos < count; pos += QBUFF_PER_PAGE)
+ free_page((unsigned long) buf[pos]);
+}
+EXPORT_SYMBOL_GPL(qdio_free_buffers);
+
+/**
+ * qdio_alloc_buffers() - allocate qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers to allocate
+ */
+int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+ int pos;
+
+ for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) {
+ buf[pos] = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!buf[pos]) {
+ qdio_free_buffers(buf, count);
+ return -ENOMEM;
+ }
+ }
+ for (pos = 0; pos < count; pos++)
+ if (pos % QBUFF_PER_PAGE)
+ buf[pos] = buf[pos - 1] + 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_alloc_buffers);
+
+/**
+ * qdio_reset_buffers() - reset qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers that will be zeroed
+ */
+void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+ int pos;
+
+ for (pos = 0; pos < count; pos++)
+ memset(buf[pos], 0, sizeof(struct qdio_buffer));
+}
+EXPORT_SYMBOL_GPL(qdio_reset_buffers);
+
+/*
+ * qebsm is only available under 64bit but the adapter sets the feature
+ * flag anyway, so we manually override it.
+ */
+static inline int qebsm_possible(void)
+{
+ return css_general_characteristics.qebsm;
+}
+
+/*
+ * qib_param_field: pointer to 128 bytes or NULL, if no param field
+ * nr_input_qs: pointer to nr_queues*128 words of data or NULL
+ */
+static void set_impl_params(struct qdio_irq *irq_ptr,
+ unsigned int qib_param_field_format,
+ unsigned char *qib_param_field,
+ unsigned long *input_slib_elements,
+ unsigned long *output_slib_elements)
+{
+ struct qdio_q *q;
+ int i, j;
+
+ if (!irq_ptr)
+ return;
+
+ irq_ptr->qib.pfmt = qib_param_field_format;
+ if (qib_param_field)
+ memcpy(irq_ptr->qib.parm, qib_param_field,
+ QDIO_MAX_BUFFERS_PER_Q);
+
+ if (!input_slib_elements)
+ goto output;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ q->slib->slibe[j].parms =
+ input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
+ }
+output:
+ if (!output_slib_elements)
+ return;
+
+ for_each_output_queue(irq_ptr, q, i) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ q->slib->slibe[j].parms =
+ output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
+ }
+}
+
+static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
+{
+ struct qdio_q *q;
+ int i;
+
+ for (i = 0; i < nr_queues; i++) {
+ q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
+ if (!q->slib) {
+ kmem_cache_free(qdio_q_cache, q);
+ return -ENOMEM;
+ }
+ irq_ptr_qs[i] = q;
+ }
+ return 0;
+}
+
+int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
+{
+ int rc;
+
+ rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
+ if (rc)
+ return rc;
+ rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
+ return rc;
+}
+
+static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
+ qdio_handler_t *handler, int i)
+{
+ struct slib *slib = q->slib;
+
+ /* queue must be cleared for qdio_establish */
+ memset(q, 0, sizeof(*q));
+ memset(slib, 0, PAGE_SIZE);
+ q->slib = slib;
+ q->irq_ptr = irq_ptr;
+ q->mask = 1 << (31 - i);
+ q->nr = i;
+ q->handler = handler;
+}
+
+static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
+ void **sbals_array, int i)
+{
+ struct qdio_q *prev;
+ int j;
+
+ DBF_HEX(&q, sizeof(void *));
+ q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
+
+ /* fill in sbal */
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ q->sbal[j] = *sbals_array++;
+
+ /* fill in slib */
+ if (i > 0) {
+ prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
+ : irq_ptr->output_qs[i - 1];
+ prev->slib->nsliba = (unsigned long)q->slib;
+ }
+
+ q->slib->sla = (unsigned long)q->sl;
+ q->slib->slsba = (unsigned long)&q->slsb.val[0];
+
+ /* fill in sl */
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ q->sl->element[j].sbal = (unsigned long)q->sbal[j];
+}
+
+static void setup_queues(struct qdio_irq *irq_ptr,
+ struct qdio_initialize *qdio_init)
+{
+ struct qdio_q *q;
+ void **input_sbal_array = qdio_init->input_sbal_addr_array;
+ void **output_sbal_array = qdio_init->output_sbal_addr_array;
+ struct qdio_outbuf_state *output_sbal_state_array =
+ qdio_init->output_sbal_state_array;
+ int i;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ DBF_EVENT("inq:%1d", i);
+ setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
+
+ q->is_input_q = 1;
+ q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
+ qdio_init->queue_start_poll_array[i] : NULL;
+
+ setup_storage_lists(q, irq_ptr, input_sbal_array, i);
+ input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+
+ if (is_thinint_irq(irq_ptr)) {
+ tasklet_init(&q->tasklet, tiqdio_inbound_processing,
+ (unsigned long) q);
+ } else {
+ tasklet_init(&q->tasklet, qdio_inbound_processing,
+ (unsigned long) q);
+ }
+ }
+
+ for_each_output_queue(irq_ptr, q, i) {
+ DBF_EVENT("outq:%1d", i);
+ setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
+
+ q->u.out.sbal_state = output_sbal_state_array;
+ output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
+
+ q->is_input_q = 0;
+ q->u.out.scan_threshold = qdio_init->scan_threshold;
+ setup_storage_lists(q, irq_ptr, output_sbal_array, i);
+ output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+
+ tasklet_init(&q->tasklet, qdio_outbound_processing,
+ (unsigned long) q);
+ setup_timer(&q->u.out.timer, (void(*)(unsigned long))
+ &qdio_outbound_timer, (unsigned long)q);
+ }
+}
+
+static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
+{
+ if (qdioac & AC1_SIGA_INPUT_NEEDED)
+ irq_ptr->siga_flag.input = 1;
+ if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
+ irq_ptr->siga_flag.output = 1;
+ if (qdioac & AC1_SIGA_SYNC_NEEDED)
+ irq_ptr->siga_flag.sync = 1;
+ if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
+ irq_ptr->siga_flag.sync_after_ai = 1;
+ if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
+ irq_ptr->siga_flag.sync_out_after_pci = 1;
+}
+
+static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
+ unsigned char qdioac, unsigned long token)
+{
+ if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
+ goto no_qebsm;
+ if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
+ (!(qdioac & AC1_SC_QEBSM_ENABLED)))
+ goto no_qebsm;
+
+ irq_ptr->sch_token = token;
+
+ DBF_EVENT("V=V:1");
+ DBF_EVENT("%8lx", irq_ptr->sch_token);
+ return;
+
+no_qebsm:
+ irq_ptr->sch_token = 0;
+ irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
+ DBF_EVENT("noV=V");
+}
+
+/*
+ * If there is a qdio_irq we use the chsc_page and store the information
+ * in the qdio_irq, otherwise we copy it to the specified structure.
+ */
+int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
+ struct subchannel_id *schid,
+ struct qdio_ssqd_desc *data)
+{
+ struct chsc_ssqd_area *ssqd;
+ int rc;
+
+ DBF_EVENT("getssqd:%4x", schid->sch_no);
+ if (!irq_ptr) {
+ ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
+ if (!ssqd)
+ return -ENOMEM;
+ } else {
+ ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
+ }
+
+ rc = chsc_ssqd(*schid, ssqd);
+ if (rc)
+ goto out;
+
+ if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
+ !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
+ (ssqd->qdio_ssqd.sch != schid->sch_no))
+ rc = -EINVAL;
+
+ if (!rc)
+ memcpy(data, &ssqd->qdio_ssqd, sizeof(*data));
+
+out:
+ if (!irq_ptr)
+ free_page((unsigned long)ssqd);
+
+ return rc;
+}
+
+void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
+{
+ unsigned char qdioac;
+ int rc;
+
+ rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc);
+ if (rc) {
+ DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%x", rc);
+ /* all flags set, worst case */
+ qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
+ AC1_SIGA_SYNC_NEEDED;
+ } else
+ qdioac = irq_ptr->ssqd_desc.qdioac1;
+
+ check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
+ process_ac_flags(irq_ptr, qdioac);
+ DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
+ DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
+}
+
+void qdio_release_memory(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+ int i;
+
+ /*
+ * Must check queue array manually since irq_ptr->nr_input_queues /
+ * irq_ptr->nr_input_queues may not yet be set.
+ */
+ for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+ q = irq_ptr->input_qs[i];
+ if (q) {
+ free_page((unsigned long) q->slib);
+ kmem_cache_free(qdio_q_cache, q);
+ }
+ }
+ for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+ q = irq_ptr->output_qs[i];
+ if (q) {
+ if (q->u.out.use_cq) {
+ int n;
+
+ for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
+ struct qaob *aob = q->u.out.aobs[n];
+ if (aob) {
+ qdio_release_aob(aob);
+ q->u.out.aobs[n] = NULL;
+ }
+ }
+
+ qdio_disable_async_operation(&q->u.out);
+ }
+ free_page((unsigned long) q->slib);
+ kmem_cache_free(qdio_q_cache, q);
+ }
+ }
+ free_page((unsigned long) irq_ptr->qdr);
+ free_page(irq_ptr->chsc_page);
+ free_page((unsigned long) irq_ptr);
+}
+
+static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
+ struct qdio_q **irq_ptr_qs,
+ int i, int nr)
+{
+ irq_ptr->qdr->qdf0[i + nr].sliba =
+ (unsigned long)irq_ptr_qs[i]->slib;
+
+ irq_ptr->qdr->qdf0[i + nr].sla =
+ (unsigned long)irq_ptr_qs[i]->sl;
+
+ irq_ptr->qdr->qdf0[i + nr].slsba =
+ (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
+
+ irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
+ irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
+}
+
+static void setup_qdr(struct qdio_irq *irq_ptr,
+ struct qdio_initialize *qdio_init)
+{
+ int i;
+
+ irq_ptr->qdr->qfmt = qdio_init->q_format;
+ irq_ptr->qdr->ac = qdio_init->qdr_ac;
+ irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
+ irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
+ irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
+ irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
+ irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
+ irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
+
+ for (i = 0; i < qdio_init->no_input_qs; i++)
+ __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
+
+ for (i = 0; i < qdio_init->no_output_qs; i++)
+ __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
+ qdio_init->no_input_qs);
+}
+
+static void setup_qib(struct qdio_irq *irq_ptr,
+ struct qdio_initialize *init_data)
+{
+ if (qebsm_possible())
+ irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+
+ irq_ptr->qib.rflags |= init_data->qib_rflags;
+
+ irq_ptr->qib.qfmt = init_data->q_format;
+ if (init_data->no_input_qs)
+ irq_ptr->qib.isliba =
+ (unsigned long)(irq_ptr->input_qs[0]->slib);
+ if (init_data->no_output_qs)
+ irq_ptr->qib.osliba =
+ (unsigned long)(irq_ptr->output_qs[0]->slib);
+ memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
+}
+
+int qdio_setup_irq(struct qdio_initialize *init_data)
+{
+ struct ciw *ciw;
+ struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
+ int rc;
+
+ memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
+ memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+ memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
+ memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
+ memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+
+ irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
+ irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
+
+ /* wipes qib.ac, required by ar7063 */
+ memset(irq_ptr->qdr, 0, sizeof(struct qdr));
+
+ irq_ptr->int_parm = init_data->int_parm;
+ irq_ptr->nr_input_qs = init_data->no_input_qs;
+ irq_ptr->nr_output_qs = init_data->no_output_qs;
+ irq_ptr->cdev = init_data->cdev;
+ ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
+ setup_queues(irq_ptr, init_data);
+
+ setup_qib(irq_ptr, init_data);
+ qdio_setup_thinint(irq_ptr);
+ set_impl_params(irq_ptr, init_data->qib_param_field_format,
+ init_data->qib_param_field,
+ init_data->input_slib_elements,
+ init_data->output_slib_elements);
+
+ /* fill input and output descriptors */
+ setup_qdr(irq_ptr, init_data);
+
+ /* qdr, qib, sls, slsbs, slibs, sbales are filled now */
+
+ /* get qdio commands */
+ ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
+ if (!ciw) {
+ DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
+ rc = -EINVAL;
+ goto out_err;
+ }
+ irq_ptr->equeue = *ciw;
+
+ ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
+ if (!ciw) {
+ DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
+ rc = -EINVAL;
+ goto out_err;
+ }
+ irq_ptr->aqueue = *ciw;
+
+ /* set new interrupt handler */
+ irq_ptr->orig_handler = init_data->cdev->handler;
+ init_data->cdev->handler = qdio_int_handler;
+ return 0;
+out_err:
+ qdio_release_memory(irq_ptr);
+ return rc;
+}
+
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
+ struct ccw_device *cdev)
+{
+ char s[80];
+
+ snprintf(s, 80, "qdio: %s %s on SC %x using "
+ "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
+ dev_name(&cdev->dev),
+ (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
+ ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
+ irq_ptr->schid.sch_no,
+ is_thinint_irq(irq_ptr),
+ (irq_ptr->sch_token) ? 1 : 0,
+ (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0,
+ css_general_characteristics.aif_tdd,
+ (irq_ptr->siga_flag.input) ? "R" : " ",
+ (irq_ptr->siga_flag.output) ? "W" : " ",
+ (irq_ptr->siga_flag.sync) ? "S" : " ",
+ (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
+ (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
+ printk(KERN_INFO "%s", s);
+}
+
+int qdio_enable_async_operation(struct qdio_output_q *outq)
+{
+ outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
+ GFP_ATOMIC);
+ if (!outq->aobs) {
+ outq->use_cq = 0;
+ return -ENOMEM;
+ }
+ outq->use_cq = 1;
+ return 0;
+}
+
+void qdio_disable_async_operation(struct qdio_output_q *q)
+{
+ kfree(q->aobs);
+ q->aobs = NULL;
+ q->use_cq = 0;
+}
+
+int __init qdio_setup_init(void)
+{
+ int rc;
+
+ qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
+ 256, 0, NULL);
+ if (!qdio_q_cache)
+ return -ENOMEM;
+
+ qdio_aob_cache = kmem_cache_create("qdio_aob",
+ sizeof(struct qaob),
+ sizeof(struct qaob),
+ 0,
+ NULL);
+ if (!qdio_aob_cache) {
+ rc = -ENOMEM;
+ goto free_qdio_q_cache;
+ }
+
+ /* Check for OSA/FCP thin interrupts (bit 67). */
+ DBF_EVENT("thinint:%1d",
+ (css_general_characteristics.aif_osa) ? 1 : 0);
+
+ /* Check for QEBSM support in general (bit 58). */
+ DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
+ rc = 0;
+out:
+ return rc;
+free_qdio_q_cache:
+ kmem_cache_destroy(qdio_q_cache);
+ goto out;
+}
+
+void qdio_setup_exit(void)
+{
+ kmem_cache_destroy(qdio_aob_cache);
+ kmem_cache_destroy(qdio_q_cache);
+}
diff --git a/kernel/drivers/s390/cio/qdio_thinint.c b/kernel/drivers/s390/cio/qdio_thinint.c
new file mode 100644
index 000000000..5d06253c2
--- /dev/null
+++ b/kernel/drivers/s390/cio/qdio_thinint.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright IBM Corp. 2000, 2009
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/kernel_stat.h>
+#include <linux/atomic.h>
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/airq.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "ioasm.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+/*
+ * Restriction: only 63 iqdio subchannels would have its own indicator,
+ * after that, subsequent subchannels share one indicator
+ */
+#define TIQDIO_NR_NONSHARED_IND 63
+#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
+#define TIQDIO_SHARED_IND 63
+
+/* device state change indicators */
+struct indicator_t {
+ u32 ind; /* u32 because of compare-and-swap performance */
+ atomic_t count; /* use count, 0 or 1 for non-shared indicators */
+};
+
+/* list of thin interrupt input queues */
+static LIST_HEAD(tiq_list);
+static DEFINE_MUTEX(tiq_list_lock);
+
+/* Adapter interrupt definitions */
+static void tiqdio_thinint_handler(struct airq_struct *airq);
+
+static struct airq_struct tiqdio_airq = {
+ .handler = tiqdio_thinint_handler,
+ .isc = QDIO_AIRQ_ISC,
+};
+
+static struct indicator_t *q_indicators;
+
+u64 last_ai_time;
+
+/* returns addr for the device state change indicator */
+static u32 *get_indicator(void)
+{
+ int i;
+
+ for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
+ if (!atomic_read(&q_indicators[i].count)) {
+ atomic_set(&q_indicators[i].count, 1);
+ return &q_indicators[i].ind;
+ }
+
+ /* use the shared indicator */
+ atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
+ return &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+static void put_indicator(u32 *addr)
+{
+ int i;
+
+ if (!addr)
+ return;
+ i = ((unsigned long)addr - (unsigned long)q_indicators) /
+ sizeof(struct indicator_t);
+ atomic_dec(&q_indicators[i].count);
+}
+
+void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
+{
+ mutex_lock(&tiq_list_lock);
+ list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
+ mutex_unlock(&tiq_list_lock);
+ xchg(irq_ptr->dsci, 1 << 7);
+}
+
+void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+
+ q = irq_ptr->input_qs[0];
+ /* if establish triggered an error */
+ if (!q || !q->entry.prev || !q->entry.next)
+ return;
+
+ mutex_lock(&tiq_list_lock);
+ list_del_rcu(&q->entry);
+ mutex_unlock(&tiq_list_lock);
+ synchronize_rcu();
+}
+
+static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
+{
+ return irq_ptr->nr_input_qs > 1;
+}
+
+static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
+{
+ return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+static inline int shared_ind(struct qdio_irq *irq_ptr)
+{
+ return references_shared_dsci(irq_ptr) ||
+ has_multiple_inq_on_dsci(irq_ptr);
+}
+
+void clear_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return;
+ if (shared_ind(irq_ptr))
+ return;
+ xchg(irq_ptr->dsci, 0);
+}
+
+int test_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return 0;
+ if (shared_ind(irq_ptr))
+ return 0;
+ if (*irq_ptr->dsci)
+ return 1;
+ else
+ return 0;
+}
+
+static inline u32 clear_shared_ind(void)
+{
+ if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
+ return 0;
+ return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
+}
+
+static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
+{
+ struct qdio_q *q;
+ int i;
+
+ for_each_input_queue(irq, q, i) {
+ if (!references_shared_dsci(irq) &&
+ has_multiple_inq_on_dsci(irq))
+ xchg(q->irq_ptr->dsci, 0);
+
+ if (q->u.in.queue_start_poll) {
+ /* skip if polling is enabled or already in work */
+ if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+ &q->u.in.queue_irq_state)) {
+ qperf_inc(q, int_discarded);
+ continue;
+ }
+
+ /* avoid dsci clear here, done after processing */
+ q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+ q->irq_ptr->int_parm);
+ } else {
+ if (!shared_ind(q->irq_ptr))
+ xchg(q->irq_ptr->dsci, 0);
+
+ /*
+ * Call inbound processing but not directly
+ * since that could starve other thinint queues.
+ */
+ tasklet_schedule(&q->tasklet);
+ }
+ }
+}
+
+/**
+ * tiqdio_thinint_handler - thin interrupt handler for qdio
+ * @alsi: pointer to adapter local summary indicator
+ * @data: NULL
+ */
+static void tiqdio_thinint_handler(struct airq_struct *airq)
+{
+ u32 si_used = clear_shared_ind();
+ struct qdio_q *q;
+
+ last_ai_time = S390_lowcore.int_clock;
+ inc_irq_stat(IRQIO_QAI);
+
+ /* protect tiq_list entries, only changed in activate or shutdown */
+ rcu_read_lock();
+
+ /* check for work on all inbound thinint queues */
+ list_for_each_entry_rcu(q, &tiq_list, entry) {
+ struct qdio_irq *irq;
+
+ /* only process queues from changed sets */
+ irq = q->irq_ptr;
+ if (unlikely(references_shared_dsci(irq))) {
+ if (!si_used)
+ continue;
+ } else if (!*irq->dsci)
+ continue;
+
+ tiqdio_call_inq_handlers(irq);
+
+ qperf_inc(q, adapter_int);
+ }
+ rcu_read_unlock();
+}
+
+static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
+{
+ struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
+ u64 summary_indicator_addr, subchannel_indicator_addr;
+ int rc;
+
+ if (reset) {
+ summary_indicator_addr = 0;
+ subchannel_indicator_addr = 0;
+ } else {
+ summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
+ subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
+ }
+
+ rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
+ subchannel_indicator_addr);
+ if (rc) {
+ DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
+ scssc->response.code);
+ goto out;
+ }
+
+ DBF_EVENT("setscind");
+ DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr));
+ DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr));
+out:
+ return rc;
+}
+
+/* allocate non-shared indicators and shared indicator */
+int __init tiqdio_allocate_memory(void)
+{
+ q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
+ GFP_KERNEL);
+ if (!q_indicators)
+ return -ENOMEM;
+ return 0;
+}
+
+void tiqdio_free_memory(void)
+{
+ kfree(q_indicators);
+}
+
+int __init tiqdio_register_thinints(void)
+{
+ int rc;
+
+ rc = register_adapter_interrupt(&tiqdio_airq);
+ if (rc) {
+ DBF_EVENT("RTI:%x", rc);
+ return rc;
+ }
+ return 0;
+}
+
+int qdio_establish_thinint(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return 0;
+ return set_subchannel_ind(irq_ptr, 0);
+}
+
+void qdio_setup_thinint(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return;
+ irq_ptr->dsci = get_indicator();
+ DBF_HEX(&irq_ptr->dsci, sizeof(void *));
+}
+
+void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return;
+
+ /* reset adapter interrupt indicators */
+ set_subchannel_ind(irq_ptr, 1);
+ put_indicator(irq_ptr->dsci);
+}
+
+void __exit tiqdio_unregister_thinints(void)
+{
+ WARN_ON(!list_empty(&tiq_list));
+ unregister_adapter_interrupt(&tiqdio_airq);
+}
diff --git a/kernel/drivers/s390/cio/scm.c b/kernel/drivers/s390/cio/scm.c
new file mode 100644
index 000000000..15268edc5
--- /dev/null
+++ b/kernel/drivers/s390/cio/scm.c
@@ -0,0 +1,288 @@
+/*
+ * Recognize and maintain s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <asm/eadm.h>
+#include "chsc.h"
+
+static struct device *scm_root;
+
+#define to_scm_dev(n) container_of(n, struct scm_device, dev)
+#define to_scm_drv(d) container_of(d, struct scm_driver, drv)
+
+static int scmdev_probe(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+ return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
+}
+
+static int scmdev_remove(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+ return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV;
+}
+
+static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ return add_uevent_var(env, "MODALIAS=scm:scmdev");
+}
+
+static struct bus_type scm_bus_type = {
+ .name = "scm",
+ .probe = scmdev_probe,
+ .remove = scmdev_remove,
+ .uevent = scmdev_uevent,
+};
+
+/**
+ * scm_driver_register() - register a scm driver
+ * @scmdrv: driver to be registered
+ */
+int scm_driver_register(struct scm_driver *scmdrv)
+{
+ struct device_driver *drv = &scmdrv->drv;
+
+ drv->bus = &scm_bus_type;
+
+ return driver_register(drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_register);
+
+/**
+ * scm_driver_unregister() - deregister a scm driver
+ * @scmdrv: driver to be deregistered
+ */
+void scm_driver_unregister(struct scm_driver *scmdrv)
+{
+ driver_unregister(&scmdrv->drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_unregister);
+
+void scm_irq_handler(struct aob *aob, int error)
+{
+ struct aob_rq_header *aobrq = (void *) aob->request.data;
+ struct scm_device *scmdev = aobrq->scmdev;
+ struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver);
+
+ scmdrv->handler(scmdev, aobrq->data, error);
+}
+EXPORT_SYMBOL_GPL(scm_irq_handler);
+
+#define scm_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scm_device *scmdev = to_scm_dev(dev); \
+ int ret; \
+ \
+ device_lock(dev); \
+ ret = sprintf(buf, "%u\n", scmdev->attrs.name); \
+ device_unlock(dev); \
+ \
+ return ret; \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+
+scm_attr(persistence);
+scm_attr(oper_state);
+scm_attr(data_state);
+scm_attr(rank);
+scm_attr(release);
+scm_attr(res_id);
+
+static struct attribute *scmdev_attrs[] = {
+ &dev_attr_persistence.attr,
+ &dev_attr_oper_state.attr,
+ &dev_attr_data_state.attr,
+ &dev_attr_rank.attr,
+ &dev_attr_release.attr,
+ &dev_attr_res_id.attr,
+ NULL,
+};
+
+static struct attribute_group scmdev_attr_group = {
+ .attrs = scmdev_attrs,
+};
+
+static const struct attribute_group *scmdev_attr_groups[] = {
+ &scmdev_attr_group,
+ NULL,
+};
+
+static void scmdev_release(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+
+ kfree(scmdev);
+}
+
+static void scmdev_setup(struct scm_device *scmdev, struct sale *sale,
+ unsigned int size, unsigned int max_blk_count)
+{
+ dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa);
+ scmdev->nr_max_block = max_blk_count;
+ scmdev->address = sale->sa;
+ scmdev->size = 1UL << size;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.persistence = sale->p;
+ scmdev->attrs.oper_state = sale->op_state;
+ scmdev->attrs.data_state = sale->data_state;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.release = sale->r;
+ scmdev->attrs.res_id = sale->rid;
+ scmdev->dev.parent = scm_root;
+ scmdev->dev.bus = &scm_bus_type;
+ scmdev->dev.release = scmdev_release;
+ scmdev->dev.groups = scmdev_attr_groups;
+}
+
+/*
+ * Check for state-changes, notify the driver and userspace.
+ */
+static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
+{
+ struct scm_driver *scmdrv;
+ bool changed;
+
+ device_lock(&scmdev->dev);
+ changed = scmdev->attrs.rank != sale->rank ||
+ scmdev->attrs.oper_state != sale->op_state;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.oper_state = sale->op_state;
+ if (!scmdev->dev.driver)
+ goto out;
+ scmdrv = to_scm_drv(scmdev->dev.driver);
+ if (changed && scmdrv->notify)
+ scmdrv->notify(scmdev, SCM_CHANGE);
+out:
+ device_unlock(&scmdev->dev);
+ if (changed)
+ kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE);
+}
+
+static int check_address(struct device *dev, void *data)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct sale *sale = data;
+
+ return scmdev->address == sale->sa;
+}
+
+static struct scm_device *scmdev_find(struct sale *sale)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&scm_bus_type, NULL, sale, check_address);
+
+ return dev ? to_scm_dev(dev) : NULL;
+}
+
+static int scm_add(struct chsc_scm_info *scm_info, size_t num)
+{
+ struct sale *sale, *scmal = scm_info->scmal;
+ struct scm_device *scmdev;
+ int ret;
+
+ for (sale = scmal; sale < scmal + num; sale++) {
+ scmdev = scmdev_find(sale);
+ if (scmdev) {
+ scmdev_update(scmdev, sale);
+ /* Release reference from scm_find(). */
+ put_device(&scmdev->dev);
+ continue;
+ }
+ scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL);
+ if (!scmdev)
+ return -ENODEV;
+ scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc);
+ ret = device_register(&scmdev->dev);
+ if (ret) {
+ /* Release reference from device_initialize(). */
+ put_device(&scmdev->dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int scm_update_information(void)
+{
+ struct chsc_scm_info *scm_info;
+ u64 token = 0;
+ size_t num;
+ int ret;
+
+ scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!scm_info)
+ return -ENOMEM;
+
+ do {
+ ret = chsc_scm_info(scm_info, token);
+ if (ret)
+ break;
+
+ num = (scm_info->response.length -
+ (offsetof(struct chsc_scm_info, scmal) -
+ offsetof(struct chsc_scm_info, response))
+ ) / sizeof(struct sale);
+
+ ret = scm_add(scm_info, num);
+ if (ret)
+ break;
+
+ token = scm_info->restok;
+ } while (token);
+
+ free_page((unsigned long)scm_info);
+
+ return ret;
+}
+
+static int scm_dev_avail(struct device *dev, void *unused)
+{
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+ struct scm_device *scmdev = to_scm_dev(dev);
+
+ if (dev->driver && scmdrv->notify)
+ scmdrv->notify(scmdev, SCM_AVAIL);
+
+ return 0;
+}
+
+int scm_process_availability_information(void)
+{
+ return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail);
+}
+
+static int __init scm_init(void)
+{
+ int ret;
+
+ ret = bus_register(&scm_bus_type);
+ if (ret)
+ return ret;
+
+ scm_root = root_device_register("scm");
+ if (IS_ERR(scm_root)) {
+ bus_unregister(&scm_bus_type);
+ return PTR_ERR(scm_root);
+ }
+
+ scm_update_information();
+ return 0;
+}
+subsys_initcall_sync(scm_init);