summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/staging/unisys/uislib
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/staging/unisys/uislib')
-rw-r--r--kernel/drivers/staging/unisys/uislib/Kconfig10
-rw-r--r--kernel/drivers/staging/unisys/uislib/Makefile12
-rw-r--r--kernel/drivers/staging/unisys/uislib/uislib.c1372
-rw-r--r--kernel/drivers/staging/unisys/uislib/uisqueue.c322
-rw-r--r--kernel/drivers/staging/unisys/uislib/uisthread.c69
-rw-r--r--kernel/drivers/staging/unisys/uislib/uisutils.c137
6 files changed, 1922 insertions, 0 deletions
diff --git a/kernel/drivers/staging/unisys/uislib/Kconfig b/kernel/drivers/staging/unisys/uislib/Kconfig
new file mode 100644
index 000000000..c39a0a21a
--- /dev/null
+++ b/kernel/drivers/staging/unisys/uislib/Kconfig
@@ -0,0 +1,10 @@
+#
+# Unisys uislib configuration
+#
+
+config UNISYS_UISLIB
+ tristate "Unisys uislib driver"
+ select UNISYS_VISORCHIPSET
+ ---help---
+ If you say Y here, you will enable the Unisys uislib driver.
+
diff --git a/kernel/drivers/staging/unisys/uislib/Makefile b/kernel/drivers/staging/unisys/uislib/Makefile
new file mode 100644
index 000000000..860f494f1
--- /dev/null
+++ b/kernel/drivers/staging/unisys/uislib/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for Unisys uislib
+#
+
+obj-$(CONFIG_UNISYS_UISLIB) += visoruislib.o
+
+visoruislib-y := uislib.o uisqueue.o uisthread.o uisutils.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+ccflags-y += -Idrivers/staging/unisys/visorchipset
+ccflags-y += -Idrivers/staging/unisys/common-spar/include
+ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels
diff --git a/kernel/drivers/staging/unisys/uislib/uislib.c b/kernel/drivers/staging/unisys/uislib/uislib.c
new file mode 100644
index 000000000..f93d0bb11
--- /dev/null
+++ b/kernel/drivers/staging/unisys/uislib/uislib.c
@@ -0,0 +1,1372 @@
+/* uislib.c
+ *
+ * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* @ALL_INSPECTED */
+#define EXPORT_SYMTAB
+#include <linux/kernel.h>
+#include <linux/highmem.h>
+#ifdef CONFIG_MODVERSIONS
+#include <config/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/debugfs.h>
+
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+#include <linux/version.h>
+#include "diagnostics/appos_subsystems.h"
+#include "uisutils.h"
+#include "vbuschannel.h"
+
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h> /* for copy_from_user */
+#include <linux/ctype.h> /* for toupper */
+#include <linux/list.h>
+
+#include "sparstop.h"
+#include "visorchipset.h"
+#include "version.h"
+#include "guestlinuxdebug.h"
+
+#define SET_PROC_OWNER(x, y)
+
+#define POLLJIFFIES_NORMAL 1
+/* Choose whether or not you want to wakeup the request-polling thread
+ * after an IO termination:
+ * this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC UISLIB_PC_uislib_c
+#define __MYFILE__ "uislib.c"
+
+/* global function pointers that act as callback functions into virtpcimod */
+int (*virt_control_chan_func)(struct guest_msgs *);
+
+static int debug_buf_valid;
+static char *debug_buf; /* Note this MUST be global,
+ * because the contents must */
+static unsigned int chipset_inited;
+
+#define WAIT_ON_CALLBACK(handle) \
+ do { \
+ if (handle) \
+ break; \
+ UIS_THREAD_WAIT; \
+ } while (1)
+
+static struct bus_info *bus_list;
+static rwlock_t bus_list_lock;
+static int bus_list_count; /* number of buses in the list */
+static int max_bus_count; /* maximum number of buses expected */
+static u64 phys_data_chan;
+static int platform_no;
+
+static struct uisthread_info incoming_ti;
+static BOOL incoming_started = FALSE;
+static LIST_HEAD(poll_dev_chan);
+static unsigned long long tot_moved_to_tail_cnt;
+static unsigned long long tot_wait_cnt;
+static unsigned long long tot_wakeup_cnt;
+static unsigned long long tot_schedule_cnt;
+static int en_smart_wakeup = 1;
+static DEFINE_SEMAPHORE(poll_dev_lock); /* unlocked */
+static DECLARE_WAIT_QUEUE_HEAD(poll_dev_wake_q);
+static int poll_dev_start;
+
+#define CALLHOME_PROC_ENTRY_FN "callhome"
+#define CALLHOME_THROTTLED_PROC_ENTRY_FN "callhome_throttled"
+
+#define DIR_DEBUGFS_ENTRY "uislib"
+static struct dentry *dir_debugfs;
+
+#define PLATFORMNUMBER_DEBUGFS_ENTRY_FN "platform"
+static struct dentry *platformnumber_debugfs_read;
+
+#define CYCLES_BEFORE_WAIT_DEBUGFS_ENTRY_FN "cycles_before_wait"
+static struct dentry *cycles_before_wait_debugfs_read;
+
+#define SMART_WAKEUP_DEBUGFS_ENTRY_FN "smart_wakeup"
+static struct dentry *smart_wakeup_debugfs_entry;
+
+#define INFO_DEBUGFS_ENTRY_FN "info"
+static struct dentry *info_debugfs_entry;
+
+static unsigned long long cycles_before_wait, wait_cycles;
+
+/*****************************************************/
+/* local functions */
+/*****************************************************/
+
+static ssize_t info_debugfs_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset);
+static const struct file_operations debugfs_info_fops = {
+ .read = info_debugfs_read,
+};
+
+static void
+init_msg_header(struct controlvm_message *msg, u32 id, uint rsp, uint svr)
+{
+ memset(msg, 0, sizeof(struct controlvm_message));
+ msg->hdr.id = id;
+ msg->hdr.flags.response_expected = rsp;
+ msg->hdr.flags.server = svr;
+}
+
+static __iomem void *init_vbus_channel(u64 ch_addr, u32 ch_bytes)
+{
+ void __iomem *ch = uislib_ioremap_cache(ch_addr, ch_bytes);
+
+ if (!ch)
+ return NULL;
+
+ if (!SPAR_VBUS_CHANNEL_OK_CLIENT(ch)) {
+ uislib_iounmap(ch);
+ return NULL;
+ }
+ return ch;
+}
+
+static int
+create_bus(struct controlvm_message *msg, char *buf)
+{
+ u32 bus_no, dev_count;
+ struct bus_info *tmp, *bus;
+ size_t size;
+
+ if (max_bus_count == bus_list_count) {
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, max_bus_count,
+ POSTCODE_SEVERITY_ERR);
+ return CONTROLVM_RESP_ERROR_MAX_BUSES;
+ }
+
+ bus_no = msg->cmd.create_bus.bus_no;
+ dev_count = msg->cmd.create_bus.dev_count;
+
+ POSTCODE_LINUX_4(BUS_CREATE_ENTRY_PC, bus_no, dev_count,
+ POSTCODE_SEVERITY_INFO);
+
+ size =
+ sizeof(struct bus_info) +
+ (dev_count * sizeof(struct device_info *));
+ bus = kzalloc(size, GFP_ATOMIC);
+ if (!bus) {
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ }
+
+ /* Currently by default, the bus Number is the GuestHandle.
+ * Configure Bus message can override this.
+ */
+ if (msg->hdr.flags.test_message) {
+ /* This implies we're the IOVM so set guest handle to 0... */
+ bus->guest_handle = 0;
+ bus->bus_no = bus_no;
+ bus->local_vnic = 1;
+ } else {
+ bus->bus_no = bus_no;
+ bus->guest_handle = bus_no;
+ }
+ sprintf(bus->name, "%d", (int)bus->bus_no);
+ bus->device_count = dev_count;
+ bus->device =
+ (struct device_info **)((char *)bus + sizeof(struct bus_info));
+ bus->bus_inst_uuid = msg->cmd.create_bus.bus_inst_uuid;
+ bus->bus_channel_bytes = 0;
+ bus->bus_channel = NULL;
+
+ /* add bus to our bus list - but check for duplicates first */
+ read_lock(&bus_list_lock);
+ for (tmp = bus_list; tmp; tmp = tmp->next) {
+ if (tmp->bus_no == bus->bus_no)
+ break;
+ }
+ read_unlock(&bus_list_lock);
+ if (tmp) {
+ /* found a bus already in the list with same bus_no -
+ * reject add
+ */
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
+ POSTCODE_SEVERITY_ERR);
+ kfree(bus);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ if ((msg->cmd.create_bus.channel_addr != 0) &&
+ (msg->cmd.create_bus.channel_bytes != 0)) {
+ bus->bus_channel_bytes = msg->cmd.create_bus.channel_bytes;
+ bus->bus_channel =
+ init_vbus_channel(msg->cmd.create_bus.channel_addr,
+ msg->cmd.create_bus.channel_bytes);
+ }
+ /* the msg is bound for virtpci; send guest_msgs struct to callback */
+ if (!msg->hdr.flags.server) {
+ struct guest_msgs cmd;
+
+ cmd.msgtype = GUEST_ADD_VBUS;
+ cmd.add_vbus.bus_no = bus_no;
+ cmd.add_vbus.chanptr = bus->bus_channel;
+ cmd.add_vbus.dev_count = dev_count;
+ cmd.add_vbus.bus_uuid = msg->cmd.create_bus.bus_data_type_uuid;
+ cmd.add_vbus.instance_uuid = msg->cmd.create_bus.bus_inst_uuid;
+ if (!virt_control_chan_func) {
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
+ POSTCODE_SEVERITY_ERR);
+ kfree(bus);
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+ if (!virt_control_chan_func(&cmd)) {
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
+ POSTCODE_SEVERITY_ERR);
+ kfree(bus);
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+ }
+
+ /* add bus at the head of our list */
+ write_lock(&bus_list_lock);
+ if (!bus_list) {
+ bus_list = bus;
+ } else {
+ bus->next = bus_list;
+ bus_list = bus;
+ }
+ bus_list_count++;
+ write_unlock(&bus_list_lock);
+
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->bus_no,
+ POSTCODE_SEVERITY_INFO);
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int
+destroy_bus(struct controlvm_message *msg, char *buf)
+{
+ int i;
+ struct bus_info *bus, *prev = NULL;
+ struct guest_msgs cmd;
+ u32 bus_no;
+
+ bus_no = msg->cmd.destroy_bus.bus_no;
+
+ read_lock(&bus_list_lock);
+
+ bus = bus_list;
+ while (bus) {
+ if (bus->bus_no == bus_no)
+ break;
+ prev = bus;
+ bus = bus->next;
+ }
+
+ if (!bus) {
+ read_unlock(&bus_list_lock);
+ return CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+
+ /* verify that this bus has no devices. */
+ for (i = 0; i < bus->device_count; i++) {
+ if (bus->device[i]) {
+ read_unlock(&bus_list_lock);
+ return CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED;
+ }
+ }
+ read_unlock(&bus_list_lock);
+
+ if (msg->hdr.flags.server)
+ goto remove;
+
+ /* client messages require us to call the virtpci callback associated
+ with this bus. */
+ cmd.msgtype = GUEST_DEL_VBUS;
+ cmd.del_vbus.bus_no = bus_no;
+ if (!virt_control_chan_func)
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+
+ if (!virt_control_chan_func(&cmd))
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+
+ /* finally, remove the bus from the list */
+remove:
+ write_lock(&bus_list_lock);
+ if (prev) /* not at head */
+ prev->next = bus->next;
+ else
+ bus_list = bus->next;
+ bus_list_count--;
+ write_unlock(&bus_list_lock);
+
+ if (bus->bus_channel) {
+ uislib_iounmap(bus->bus_channel);
+ bus->bus_channel = NULL;
+ }
+
+ kfree(bus);
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int create_device(struct controlvm_message *msg, char *buf)
+{
+ struct device_info *dev;
+ struct bus_info *bus;
+ struct guest_msgs cmd;
+ u32 bus_no, dev_no;
+ int result = CONTROLVM_RESP_SUCCESS;
+ u64 min_size = MIN_IO_CHANNEL_SIZE;
+ struct req_handler_info *req_handler;
+
+ bus_no = msg->cmd.create_device.bus_no;
+ dev_no = msg->cmd.create_device.dev_no;
+
+ POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_INFO);
+
+ dev = kzalloc(sizeof(*dev), GFP_ATOMIC);
+ if (!dev) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+ }
+
+ dev->channel_uuid = msg->cmd.create_device.data_type_uuid;
+ dev->intr = msg->cmd.create_device.intr;
+ dev->channel_addr = msg->cmd.create_device.channel_addr;
+ dev->bus_no = bus_no;
+ dev->dev_no = dev_no;
+ sema_init(&dev->interrupt_callback_lock, 1); /* unlocked */
+ sprintf(dev->devid, "vbus%u:dev%u", (unsigned)bus_no, (unsigned)dev_no);
+ /* map the channel memory for the device. */
+ if (msg->hdr.flags.test_message) {
+ dev->chanptr = (void __iomem *)__va(dev->channel_addr);
+ } else {
+ req_handler = req_handler_find(dev->channel_uuid);
+ if (req_handler)
+ /* generic service handler registered for this
+ * channel
+ */
+ min_size = req_handler->min_channel_bytes;
+ if (min_size > msg->cmd.create_device.channel_bytes) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL;
+ goto cleanup;
+ }
+ dev->chanptr =
+ uislib_ioremap_cache(dev->channel_addr,
+ msg->cmd.create_device.channel_bytes);
+ if (!dev->chanptr) {
+ result = CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
+ goto cleanup;
+ }
+ }
+ dev->instance_uuid = msg->cmd.create_device.dev_inst_uuid;
+ dev->channel_bytes = msg->cmd.create_device.channel_bytes;
+
+ read_lock(&bus_list_lock);
+ for (bus = bus_list; bus; bus = bus->next) {
+ if (bus->bus_no != bus_no)
+ continue;
+ /* make sure the device number is valid */
+ if (dev_no >= bus->device_count) {
+ result = CONTROLVM_RESP_ERROR_MAX_DEVICES;
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
+ read_unlock(&bus_list_lock);
+ goto cleanup;
+ }
+ /* make sure this device is not already set */
+ if (bus->device[dev_no]) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
+ dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ read_unlock(&bus_list_lock);
+ goto cleanup;
+ }
+ read_unlock(&bus_list_lock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (msg->hdr.flags.server) {
+ bus->device[dev_no] = dev;
+ POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_INFO);
+ return CONTROLVM_RESP_SUCCESS;
+ }
+ if (uuid_le_cmp(dev->channel_uuid,
+ spar_vhba_channel_protocol_uuid) == 0) {
+ wait_for_valid_guid(&((struct channel_header __iomem *)
+ (dev->chanptr))->chtype);
+ if (!SPAR_VHBA_CHANNEL_OK_CLIENT(dev->chanptr)) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
+ dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
+ goto cleanup;
+ }
+ cmd.msgtype = GUEST_ADD_VHBA;
+ cmd.add_vhba.chanptr = dev->chanptr;
+ cmd.add_vhba.bus_no = bus_no;
+ cmd.add_vhba.device_no = dev_no;
+ cmd.add_vhba.instance_uuid = dev->instance_uuid;
+ cmd.add_vhba.intr = dev->intr;
+ } else if (uuid_le_cmp(dev->channel_uuid,
+ spar_vnic_channel_protocol_uuid) == 0) {
+ wait_for_valid_guid(&((struct channel_header __iomem *)
+ (dev->chanptr))->chtype);
+ if (!SPAR_VNIC_CHANNEL_OK_CLIENT(dev->chanptr)) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
+ dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
+ goto cleanup;
+ }
+ cmd.msgtype = GUEST_ADD_VNIC;
+ cmd.add_vnic.chanptr = dev->chanptr;
+ cmd.add_vnic.bus_no = bus_no;
+ cmd.add_vnic.device_no = dev_no;
+ cmd.add_vnic.instance_uuid = dev->instance_uuid;
+ cmd.add_vhba.intr = dev->intr;
+ } else {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ goto cleanup;
+ }
+
+ if (!virt_control_chan_func) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ goto cleanup;
+ }
+
+ if (!virt_control_chan_func(&cmd)) {
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_ERR);
+ result =
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ goto cleanup;
+ }
+
+ bus->device[dev_no] = dev;
+ POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no,
+ bus_no, POSTCODE_SEVERITY_INFO);
+ return CONTROLVM_RESP_SUCCESS;
+ }
+ read_unlock(&bus_list_lock);
+
+ POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ result = CONTROLVM_RESP_ERROR_BUS_INVALID;
+
+cleanup:
+ if (!msg->hdr.flags.test_message) {
+ uislib_iounmap(dev->chanptr);
+ dev->chanptr = NULL;
+ }
+
+ kfree(dev);
+ return result;
+}
+
+static int pause_device(struct controlvm_message *msg)
+{
+ u32 bus_no, dev_no;
+ struct bus_info *bus;
+ struct device_info *dev;
+ struct guest_msgs cmd;
+ int retval = CONTROLVM_RESP_SUCCESS;
+
+ bus_no = msg->cmd.device_change_state.bus_no;
+ dev_no = msg->cmd.device_change_state.dev_no;
+
+ read_lock(&bus_list_lock);
+ for (bus = bus_list; bus; bus = bus->next) {
+ if (bus->bus_no == bus_no) {
+ /* make sure the device number is valid */
+ if (dev_no >= bus->device_count) {
+ retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ } else {
+ /* make sure this device exists */
+ dev = bus->device[dev_no];
+ if (!dev) {
+ retval =
+ CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ }
+ break;
+ }
+ }
+ if (!bus)
+ retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
+
+ read_unlock(&bus_list_lock);
+ if (retval == CONTROLVM_RESP_SUCCESS) {
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (uuid_le_cmp(dev->channel_uuid,
+ spar_vhba_channel_protocol_uuid) == 0) {
+ cmd.msgtype = GUEST_PAUSE_VHBA;
+ cmd.pause_vhba.chanptr = dev->chanptr;
+ } else if (uuid_le_cmp(dev->channel_uuid,
+ spar_vnic_channel_protocol_uuid) == 0) {
+ cmd.msgtype = GUEST_PAUSE_VNIC;
+ cmd.pause_vnic.chanptr = dev->chanptr;
+ } else {
+ return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ }
+ if (!virt_control_chan_func)
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ if (!virt_control_chan_func(&cmd)) {
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+ }
+ return retval;
+}
+
+static int resume_device(struct controlvm_message *msg)
+{
+ u32 bus_no, dev_no;
+ struct bus_info *bus;
+ struct device_info *dev;
+ struct guest_msgs cmd;
+ int retval = CONTROLVM_RESP_SUCCESS;
+
+ bus_no = msg->cmd.device_change_state.bus_no;
+ dev_no = msg->cmd.device_change_state.dev_no;
+
+ read_lock(&bus_list_lock);
+ for (bus = bus_list; bus; bus = bus->next) {
+ if (bus->bus_no == bus_no) {
+ /* make sure the device number is valid */
+ if (dev_no >= bus->device_count) {
+ retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ } else {
+ /* make sure this device exists */
+ dev = bus->device[dev_no];
+ if (!dev) {
+ retval =
+ CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ }
+ break;
+ }
+ }
+
+ if (!bus)
+ retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
+
+ read_unlock(&bus_list_lock);
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (retval == CONTROLVM_RESP_SUCCESS) {
+ if (uuid_le_cmp(dev->channel_uuid,
+ spar_vhba_channel_protocol_uuid) == 0) {
+ cmd.msgtype = GUEST_RESUME_VHBA;
+ cmd.resume_vhba.chanptr = dev->chanptr;
+ } else if (uuid_le_cmp(dev->channel_uuid,
+ spar_vnic_channel_protocol_uuid) == 0) {
+ cmd.msgtype = GUEST_RESUME_VNIC;
+ cmd.resume_vnic.chanptr = dev->chanptr;
+ } else {
+ return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ }
+ if (!virt_control_chan_func)
+ return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ if (!virt_control_chan_func(&cmd)) {
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+ }
+ return retval;
+}
+
+static int destroy_device(struct controlvm_message *msg, char *buf)
+{
+ u32 bus_no, dev_no;
+ struct bus_info *bus;
+ struct device_info *dev;
+ struct guest_msgs cmd;
+ int retval = CONTROLVM_RESP_SUCCESS;
+
+ bus_no = msg->cmd.destroy_device.bus_no;
+ dev_no = msg->cmd.destroy_device.bus_no;
+
+ read_lock(&bus_list_lock);
+ for (bus = bus_list; bus; bus = bus->next) {
+ if (bus->bus_no == bus_no) {
+ /* make sure the device number is valid */
+ if (dev_no >= bus->device_count) {
+ retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+ } else {
+ /* make sure this device exists */
+ dev = bus->device[dev_no];
+ if (!dev) {
+ retval =
+ CONTROLVM_RESP_ERROR_ALREADY_DONE;
+ }
+ }
+ break;
+ }
+ }
+
+ if (!bus)
+ retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
+ read_unlock(&bus_list_lock);
+ if (retval == CONTROLVM_RESP_SUCCESS) {
+ /* the msg is bound for virtpci; send
+ * guest_msgs struct to callback
+ */
+ if (uuid_le_cmp(dev->channel_uuid,
+ spar_vhba_channel_protocol_uuid) == 0) {
+ cmd.msgtype = GUEST_DEL_VHBA;
+ cmd.del_vhba.chanptr = dev->chanptr;
+ } else if (uuid_le_cmp(dev->channel_uuid,
+ spar_vnic_channel_protocol_uuid) == 0) {
+ cmd.msgtype = GUEST_DEL_VNIC;
+ cmd.del_vnic.chanptr = dev->chanptr;
+ } else {
+ return
+ CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
+ }
+ if (!virt_control_chan_func) {
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
+ }
+ if (!virt_control_chan_func(&cmd)) {
+ return
+ CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
+ }
+/* you must disable channel interrupts BEFORE you unmap the channel,
+ * because if you unmap first, there may still be some activity going
+ * on which accesses the channel and you will get a "unable to handle
+ * kernel paging request"
+ */
+ if (dev->polling)
+ uislib_disable_channel_interrupts(bus_no, dev_no);
+ /* unmap the channel memory for the device. */
+ if (!msg->hdr.flags.test_message)
+ uislib_iounmap(dev->chanptr);
+ kfree(dev);
+ bus->device[dev_no] = NULL;
+ }
+ return retval;
+}
+
+static int
+init_chipset(struct controlvm_message *msg, char *buf)
+{
+ POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ max_bus_count = msg->cmd.init_chipset.bus_count;
+ platform_no = msg->cmd.init_chipset.platform_number;
+ phys_data_chan = 0;
+
+ /* We need to make sure we have our functions registered
+ * before processing messages. If we are a test vehicle the
+ * test_message for init_chipset will be set. We can ignore the
+ * waits for the callbacks, since this will be manually entered
+ * from a user. If no test_message is set, we will wait for the
+ * functions.
+ */
+ if (!msg->hdr.flags.test_message)
+ WAIT_ON_CALLBACK(virt_control_chan_func);
+
+ chipset_inited = 1;
+ POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
+
+ return CONTROLVM_RESP_SUCCESS;
+}
+
+static int delete_bus_glue(u32 bus_no)
+{
+ struct controlvm_message msg;
+
+ init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
+ msg.cmd.destroy_bus.bus_no = bus_no;
+ if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
+ return 0;
+ return 1;
+}
+
+static int delete_device_glue(u32 bus_no, u32 dev_no)
+{
+ struct controlvm_message msg;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
+ msg.cmd.destroy_device.bus_no = bus_no;
+ msg.cmd.destroy_device.dev_no = dev_no;
+ if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
+ return 0;
+ return 1;
+}
+
+int
+uislib_client_inject_add_bus(u32 bus_no, uuid_le inst_uuid,
+ u64 channel_addr, ulong n_channel_bytes)
+{
+ struct controlvm_message msg;
+
+ /* step 0: init the chipset */
+ POSTCODE_LINUX_3(CHIPSET_INIT_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
+
+ if (!chipset_inited) {
+ /* step: initialize the chipset */
+ init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0);
+ /* this change is needed so that console will come up
+ * OK even when the bus 0 create comes in late. If the
+ * bus 0 create is the first create, then the add_vnic
+ * will work fine, but if the bus 0 create arrives
+ * after number 4, then the add_vnic will fail, and the
+ * ultraboot will fail.
+ */
+ msg.cmd.init_chipset.bus_count = 23;
+ msg.cmd.init_chipset.switch_count = 0;
+ if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
+ return 0;
+ POSTCODE_LINUX_3(CHIPSET_INIT_EXIT_PC, bus_no,
+ POSTCODE_SEVERITY_INFO);
+ }
+
+ /* step 1: create a bus */
+ POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no,
+ POSTCODE_SEVERITY_WARNING);
+ init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
+ msg.cmd.create_bus.bus_no = bus_no;
+ msg.cmd.create_bus.dev_count = 23; /* devNo+1; */
+ msg.cmd.create_bus.channel_addr = channel_addr;
+ msg.cmd.create_bus.channel_bytes = n_channel_bytes;
+ if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_add_bus);
+
+int
+uislib_client_inject_del_bus(u32 bus_no)
+{
+ return delete_bus_glue(bus_no);
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_del_bus);
+
+int
+uislib_client_inject_pause_vhba(u32 bus_no, u32 dev_no)
+{
+ struct controlvm_message msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_standby;
+ rc = pause_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ return rc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vhba);
+
+int
+uislib_client_inject_resume_vhba(u32 bus_no, u32 dev_no)
+{
+ struct controlvm_message msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_running;
+ rc = resume_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ return rc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vhba);
+
+int
+uislib_client_inject_add_vhba(u32 bus_no, u32 dev_no,
+ u64 phys_chan_addr, u32 chan_bytes,
+ int is_test_addr, uuid_le inst_uuid,
+ struct irq_info *intr)
+{
+ struct controlvm_message msg;
+
+ /* chipset init'ed with bus bus has been previously created -
+ * Verify it still exists step 2: create the VHBA device on the
+ * bus
+ */
+ POSTCODE_LINUX_4(VHBA_CREATE_ENTRY_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_INFO);
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
+ if (is_test_addr)
+ /* signify that the physical channel address does NOT
+ * need to be ioremap()ed
+ */
+ msg.hdr.flags.test_message = 1;
+ msg.cmd.create_device.bus_no = bus_no;
+ msg.cmd.create_device.dev_no = dev_no;
+ msg.cmd.create_device.dev_inst_uuid = inst_uuid;
+ if (intr)
+ msg.cmd.create_device.intr = *intr;
+ else
+ memset(&msg.cmd.create_device.intr, 0,
+ sizeof(struct irq_info));
+ msg.cmd.create_device.channel_addr = phys_chan_addr;
+ if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
+ POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, chan_bytes,
+ MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ msg.cmd.create_device.channel_bytes = chan_bytes;
+ msg.cmd.create_device.data_type_uuid = spar_vhba_channel_protocol_uuid;
+ if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ POSTCODE_LINUX_4(VHBA_CREATE_SUCCESS_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_INFO);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_add_vhba);
+
+int
+uislib_client_inject_del_vhba(u32 bus_no, u32 dev_no)
+{
+ return delete_device_glue(bus_no, dev_no);
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_del_vhba);
+
+int
+uislib_client_inject_add_vnic(u32 bus_no, u32 dev_no,
+ u64 phys_chan_addr, u32 chan_bytes,
+ int is_test_addr, uuid_le inst_uuid,
+ struct irq_info *intr)
+{
+ struct controlvm_message msg;
+
+ /* chipset init'ed with bus bus has been previously created -
+ * Verify it still exists step 2: create the VNIC device on the
+ * bus
+ */
+ POSTCODE_LINUX_4(VNIC_CREATE_ENTRY_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_INFO);
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
+ if (is_test_addr)
+ /* signify that the physical channel address does NOT
+ * need to be ioremap()ed
+ */
+ msg.hdr.flags.test_message = 1;
+ msg.cmd.create_device.bus_no = bus_no;
+ msg.cmd.create_device.dev_no = dev_no;
+ msg.cmd.create_device.dev_inst_uuid = inst_uuid;
+ if (intr)
+ msg.cmd.create_device.intr = *intr;
+ else
+ memset(&msg.cmd.create_device.intr, 0,
+ sizeof(struct irq_info));
+ msg.cmd.create_device.channel_addr = phys_chan_addr;
+ if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
+ POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, chan_bytes,
+ MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+ msg.cmd.create_device.channel_bytes = chan_bytes;
+ msg.cmd.create_device.data_type_uuid = spar_vnic_channel_protocol_uuid;
+ if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
+ POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_ERR);
+ return 0;
+ }
+
+ POSTCODE_LINUX_4(VNIC_CREATE_SUCCESS_PC, dev_no, bus_no,
+ POSTCODE_SEVERITY_INFO);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_add_vnic);
+
+int
+uislib_client_inject_pause_vnic(u32 bus_no, u32 dev_no)
+{
+ struct controlvm_message msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_standby;
+ rc = pause_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ return -1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vnic);
+
+int
+uislib_client_inject_resume_vnic(u32 bus_no, u32 dev_no)
+{
+ struct controlvm_message msg;
+ int rc;
+
+ init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_running;
+ rc = resume_device(&msg);
+ if (rc != CONTROLVM_RESP_SUCCESS)
+ return -1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vnic);
+
+int
+uislib_client_inject_del_vnic(u32 bus_no, u32 dev_no)
+{
+ return delete_device_glue(bus_no, dev_no);
+}
+EXPORT_SYMBOL_GPL(uislib_client_inject_del_vnic);
+
+void *
+uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln)
+{
+ /* __GFP_NORETRY means "ok to fail", meaning kmalloc() can
+ * return NULL. If you do NOT specify __GFP_NORETRY, Linux
+ * will go to extreme measures to get memory for you (like,
+ * invoke oom killer), which will probably cripple the system.
+ */
+ void *p = kmem_cache_alloc(cur_pool, GFP_ATOMIC | __GFP_NORETRY);
+
+ if (!p)
+ return NULL;
+ return p;
+}
+EXPORT_SYMBOL_GPL(uislib_cache_alloc);
+
+void
+uislib_cache_free(struct kmem_cache *cur_pool, void *p, char *fn, int ln)
+{
+ if (!p)
+ return;
+ kmem_cache_free(cur_pool, p);
+}
+EXPORT_SYMBOL_GPL(uislib_cache_free);
+
+/*****************************************************/
+/* proc filesystem callback functions */
+/*****************************************************/
+
+#define PLINE(...) uisutil_add_proc_line_ex(&tot, buff, \
+ buff_len, __VA_ARGS__)
+
+static int
+info_debugfs_read_helper(char **buff, int *buff_len)
+{
+ int i, tot = 0;
+ struct bus_info *bus;
+
+ if (PLINE("\nBuses:\n") < 0)
+ goto err_done;
+
+ read_lock(&bus_list_lock);
+ for (bus = bus_list; bus; bus = bus->next) {
+ if (PLINE(" bus=0x%p, busNo=%d, deviceCount=%d\n",
+ bus, bus->bus_no, bus->device_count) < 0)
+ goto err_done_unlock;
+
+ if (PLINE(" Devices:\n") < 0)
+ goto err_done_unlock;
+
+ for (i = 0; i < bus->device_count; i++) {
+ if (bus->device[i]) {
+ if (PLINE(" busNo %d, device[%i]: 0x%p, chanptr=0x%p, swtch=0x%p\n",
+ bus->bus_no, i, bus->device[i],
+ bus->device[i]->chanptr,
+ bus->device[i]->swtch) < 0)
+ goto err_done_unlock;
+
+ if (PLINE(" first_busy_cnt=%llu, moved_to_tail_cnt=%llu, last_on_list_cnt=%llu\n",
+ bus->device[i]->first_busy_cnt,
+ bus->device[i]->moved_to_tail_cnt,
+ bus->device[i]->last_on_list_cnt) < 0)
+ goto err_done_unlock;
+ }
+ }
+ }
+ read_unlock(&bus_list_lock);
+
+ if (PLINE("UisUtils_Registered_Services: %d\n",
+ atomic_read(&uisutils_registered_services)) < 0)
+ goto err_done;
+ if (PLINE("cycles_before_wait %llu wait_cycles:%llu\n",
+ cycles_before_wait, wait_cycles) < 0)
+ goto err_done;
+ if (PLINE("tot_wakeup_cnt %llu:tot_wait_cnt %llu:tot_schedule_cnt %llu\n",
+ tot_wakeup_cnt, tot_wait_cnt, tot_schedule_cnt) < 0)
+ goto err_done;
+ if (PLINE("en_smart_wakeup %d\n", en_smart_wakeup) < 0)
+ goto err_done;
+ if (PLINE("tot_moved_to_tail_cnt %llu\n", tot_moved_to_tail_cnt) < 0)
+ goto err_done;
+
+ return tot;
+
+err_done_unlock:
+ read_unlock(&bus_list_lock);
+err_done:
+ return -1;
+}
+
+static ssize_t info_debugfs_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ char *temp;
+ int total_bytes = 0;
+ int remaining_bytes = PROC_READ_BUFFER_SIZE;
+
+/* *start = buf; */
+ if (!debug_buf) {
+ debug_buf = vmalloc(PROC_READ_BUFFER_SIZE);
+
+ if (!debug_buf)
+ return -ENOMEM;
+ }
+
+ temp = debug_buf;
+
+ if ((*offset == 0) || (!debug_buf_valid)) {
+ /* if the read fails, then -1 will be returned */
+ total_bytes = info_debugfs_read_helper(&temp, &remaining_bytes);
+ debug_buf_valid = 1;
+ } else {
+ total_bytes = strlen(debug_buf);
+ }
+
+ return simple_read_from_buffer(buf, len, offset,
+ debug_buf, total_bytes);
+}
+
+static struct device_info *find_dev(u32 bus_no, u32 dev_no)
+{
+ struct bus_info *bus;
+ struct device_info *dev = NULL;
+
+ read_lock(&bus_list_lock);
+ for (bus = bus_list; bus; bus = bus->next) {
+ if (bus->bus_no == bus_no) {
+ /* make sure the device number is valid */
+ if (dev_no >= bus->device_count)
+ break;
+ dev = bus->device[dev_no];
+ break;
+ }
+ }
+ read_unlock(&bus_list_lock);
+ return dev;
+}
+
+/* This thread calls the "interrupt" function for each device that has
+ * enabled such using uislib_enable_channel_interrupts(). The "interrupt"
+ * function typically reads and processes the devices's channel input
+ * queue. This thread repeatedly does this, until the thread is told to stop
+ * (via uisthread_stop()). Sleeping rules:
+ * - If we have called the "interrupt" function for all devices, and all of
+ * them have reported "nothing processed" (returned 0), then we will go to
+ * sleep for a maximum of POLLJIFFIES_NORMAL jiffies.
+ * - If anyone calls uislib_force_channel_interrupt(), the above jiffy
+ * sleep will be interrupted, and we will resume calling the "interrupt"
+ * function for all devices.
+ * - The list of devices is dynamically re-ordered in order to
+ * attempt to preserve fairness. Whenever we spin thru the list of
+ * devices and call the dev->interrupt() function, if we find
+ * devices which report that there is still more work to do, the
+ * the first such device we find is moved to the end of the device
+ * list. This ensures that extremely busy devices don't starve out
+ * less-busy ones.
+ *
+ */
+static int process_incoming(void *v)
+{
+ unsigned long long cur_cycles, old_cycles, idle_cycles, delta_cycles;
+ struct list_head *new_tail = NULL;
+ int i;
+
+ UIS_DAEMONIZE("dev_incoming");
+ for (i = 0; i < 16; i++) {
+ old_cycles = get_cycles();
+ wait_event_timeout(poll_dev_wake_q,
+ 0, POLLJIFFIES_NORMAL);
+ cur_cycles = get_cycles();
+ if (wait_cycles == 0) {
+ wait_cycles = (cur_cycles - old_cycles);
+ } else {
+ if (wait_cycles < (cur_cycles - old_cycles))
+ wait_cycles = (cur_cycles - old_cycles);
+ }
+ }
+ cycles_before_wait = wait_cycles;
+ idle_cycles = 0;
+ poll_dev_start = 0;
+ while (1) {
+ struct list_head *lelt, *tmp;
+ struct device_info *dev = NULL;
+
+ /* poll each channel for input */
+ down(&poll_dev_lock);
+ new_tail = NULL;
+ list_for_each_safe(lelt, tmp, &poll_dev_chan) {
+ int rc = 0;
+
+ dev = list_entry(lelt, struct device_info,
+ list_polling_device_channels);
+ down(&dev->interrupt_callback_lock);
+ if (dev->interrupt)
+ rc = dev->interrupt(dev->interrupt_context);
+ else
+ continue;
+ up(&dev->interrupt_callback_lock);
+ if (rc) {
+ /* dev->interrupt returned, but there
+ * is still more work to do.
+ * Reschedule work to occur as soon as
+ * possible. */
+ idle_cycles = 0;
+ if (!new_tail) {
+ dev->first_busy_cnt++;
+ if (!
+ (list_is_last
+ (lelt,
+ &poll_dev_chan))) {
+ new_tail = lelt;
+ dev->moved_to_tail_cnt++;
+ } else {
+ dev->last_on_list_cnt++;
+ }
+ }
+ }
+ if (kthread_should_stop())
+ break;
+ }
+ if (new_tail) {
+ tot_moved_to_tail_cnt++;
+ list_move_tail(new_tail, &poll_dev_chan);
+ }
+ up(&poll_dev_lock);
+ cur_cycles = get_cycles();
+ delta_cycles = cur_cycles - old_cycles;
+ old_cycles = cur_cycles;
+
+ /* At this point, we have scanned thru all of the
+ * channels, and at least one of the following is true:
+ * - there is no input waiting on any of the channels
+ * - we have received a signal to stop this thread
+ */
+ if (kthread_should_stop())
+ break;
+ if (en_smart_wakeup == 0xFF)
+ break;
+ /* wait for POLLJIFFIES_NORMAL jiffies, or until
+ * someone wakes up poll_dev_wake_q,
+ * whichever comes first only do a wait when we have
+ * been idle for cycles_before_wait cycles.
+ */
+ if (idle_cycles > cycles_before_wait) {
+ poll_dev_start = 0;
+ tot_wait_cnt++;
+ wait_event_timeout(poll_dev_wake_q,
+ poll_dev_start,
+ POLLJIFFIES_NORMAL);
+ poll_dev_start = 1;
+ } else {
+ tot_schedule_cnt++;
+ schedule();
+ idle_cycles = idle_cycles + delta_cycles;
+ }
+ }
+ complete_and_exit(&incoming_ti.has_stopped, 0);
+}
+
+static BOOL
+initialize_incoming_thread(void)
+{
+ if (incoming_started)
+ return TRUE;
+ if (!uisthread_start(&incoming_ti,
+ &process_incoming, NULL, "dev_incoming")) {
+ return FALSE;
+ }
+ incoming_started = TRUE;
+ return TRUE;
+}
+
+/* Add a new device/channel to the list being processed by
+ * process_incoming().
+ * <interrupt> - indicates the function to call periodically.
+ * <interrupt_context> - indicates the data to pass to the <interrupt>
+ * function.
+ */
+void
+uislib_enable_channel_interrupts(u32 bus_no, u32 dev_no,
+ int (*interrupt)(void *),
+ void *interrupt_context)
+{
+ struct device_info *dev;
+
+ dev = find_dev(bus_no, dev_no);
+ if (!dev)
+ return;
+
+ down(&poll_dev_lock);
+ initialize_incoming_thread();
+ dev->interrupt = interrupt;
+ dev->interrupt_context = interrupt_context;
+ dev->polling = TRUE;
+ list_add_tail(&dev->list_polling_device_channels,
+ &poll_dev_chan);
+ up(&poll_dev_lock);
+}
+EXPORT_SYMBOL_GPL(uislib_enable_channel_interrupts);
+
+/* Remove a device/channel from the list being processed by
+ * process_incoming().
+ */
+void
+uislib_disable_channel_interrupts(u32 bus_no, u32 dev_no)
+{
+ struct device_info *dev;
+
+ dev = find_dev(bus_no, dev_no);
+ if (!dev)
+ return;
+ down(&poll_dev_lock);
+ list_del(&dev->list_polling_device_channels);
+ dev->polling = FALSE;
+ dev->interrupt = NULL;
+ up(&poll_dev_lock);
+}
+EXPORT_SYMBOL_GPL(uislib_disable_channel_interrupts);
+
+static void
+do_wakeup_polling_device_channels(struct work_struct *dummy)
+{
+ if (!poll_dev_start) {
+ poll_dev_start = 1;
+ wake_up(&poll_dev_wake_q);
+ }
+}
+
+static DECLARE_WORK(work_wakeup_polling_device_channels,
+ do_wakeup_polling_device_channels);
+
+/* Call this function when you want to send a hint to process_incoming() that
+ * your device might have more requests.
+ */
+void
+uislib_force_channel_interrupt(u32 bus_no, u32 dev_no)
+{
+ if (en_smart_wakeup == 0)
+ return;
+ if (poll_dev_start)
+ return;
+ /* The point of using schedule_work() instead of just doing
+ * the work inline is to force a slight delay before waking up
+ * the process_incoming() thread.
+ */
+ tot_wakeup_cnt++;
+ schedule_work(&work_wakeup_polling_device_channels);
+}
+EXPORT_SYMBOL_GPL(uislib_force_channel_interrupt);
+
+/*****************************************************/
+/* Module Init & Exit functions */
+/*****************************************************/
+
+static int __init
+uislib_mod_init(void)
+{
+ if (!unisys_spar_platform)
+ return -ENODEV;
+
+ /* initialize global pointers to NULL */
+ bus_list = NULL;
+ bus_list_count = 0;
+ max_bus_count = 0;
+ rwlock_init(&bus_list_lock);
+ virt_control_chan_func = NULL;
+
+ /* Issue VMCALL_GET_CONTROLVM_ADDR to get CtrlChanPhysAddr and
+ * then map this physical address to a virtual address. */
+ POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+
+ dir_debugfs = debugfs_create_dir(DIR_DEBUGFS_ENTRY, NULL);
+ if (dir_debugfs) {
+ info_debugfs_entry = debugfs_create_file(
+ INFO_DEBUGFS_ENTRY_FN, 0444, dir_debugfs, NULL,
+ &debugfs_info_fops);
+
+ platformnumber_debugfs_read = debugfs_create_u32(
+ PLATFORMNUMBER_DEBUGFS_ENTRY_FN, 0444, dir_debugfs,
+ &platform_no);
+
+ cycles_before_wait_debugfs_read = debugfs_create_u64(
+ CYCLES_BEFORE_WAIT_DEBUGFS_ENTRY_FN, 0666, dir_debugfs,
+ &cycles_before_wait);
+
+ smart_wakeup_debugfs_entry = debugfs_create_bool(
+ SMART_WAKEUP_DEBUGFS_ENTRY_FN, 0666, dir_debugfs,
+ &en_smart_wakeup);
+ }
+
+ POSTCODE_LINUX_3(DRIVER_EXIT_PC, 0, POSTCODE_SEVERITY_INFO);
+ return 0;
+}
+
+static void __exit
+uislib_mod_exit(void)
+{
+ if (debug_buf) {
+ vfree(debug_buf);
+ debug_buf = NULL;
+ }
+
+ debugfs_remove(info_debugfs_entry);
+ debugfs_remove(smart_wakeup_debugfs_entry);
+ debugfs_remove(cycles_before_wait_debugfs_read);
+ debugfs_remove(platformnumber_debugfs_read);
+ debugfs_remove(dir_debugfs);
+}
+
+module_init(uislib_mod_init);
+module_exit(uislib_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Usha Srinivasan");
+MODULE_ALIAS("uislib");
+ /* this is extracted during depmod and kept in modules.dep */
diff --git a/kernel/drivers/staging/unisys/uislib/uisqueue.c b/kernel/drivers/staging/unisys/uislib/uisqueue.c
new file mode 100644
index 000000000..d46dd7428
--- /dev/null
+++ b/kernel/drivers/staging/unisys/uislib/uisqueue.c
@@ -0,0 +1,322 @@
+/* uisqueue.c
+ *
+ * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* @ALL_INSPECTED */
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "uisutils.h"
+
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages */
+#define CURRENT_FILE_PC UISLIB_PC_uisqueue_c
+#define __MYFILE__ "uisqueue.c"
+
+#define CHECK_CACHE_ALIGN 0
+
+/*****************************************************/
+/* Exported functions */
+/*****************************************************/
+
+/*
+ * Routine Description:
+ * Tries to insert the prebuilt signal pointed to by pSignal into the nth
+ * Queue of the Channel pointed to by pChannel
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to the signal
+ *
+ * Assumptions:
+ * - pChannel, Queue and pSignal are valid.
+ * - If insertion fails due to a full queue, the caller will determine the
+ * retry policy (e.g. wait & try again, report an error, etc.).
+ *
+ * Return value:
+ * 1 if the insertion succeeds, 0 if the queue was full.
+ */
+unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue,
+ void *sig)
+{
+ void __iomem *psignal;
+ unsigned int head, tail, nof;
+
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)
+ ((char __iomem *)ch + readq(&ch->ch_space_offset))
+ + queue;
+
+ /* capture current head and tail */
+ head = readl(&pqhdr->head);
+ tail = readl(&pqhdr->tail);
+
+ /* queue is full if (head + 1) % n equals tail */
+ if (((head + 1) % readl(&pqhdr->max_slots)) == tail) {
+ nof = readq(&pqhdr->num_overflows) + 1;
+ writeq(nof, &pqhdr->num_overflows);
+ return 0;
+ }
+
+ /* increment the head index */
+ head = (head + 1) % readl(&pqhdr->max_slots);
+
+ /* copy signal to the head location from the area pointed to
+ * by pSignal
+ */
+ psignal = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) +
+ (head * readl(&pqhdr->signal_size));
+ memcpy_toio(psignal, sig, readl(&pqhdr->signal_size));
+
+ mb(); /* channel synch */
+ writel(head, &pqhdr->head);
+
+ writeq(readq(&pqhdr->num_sent) + 1, &pqhdr->num_sent);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(spar_signal_insert);
+
+/*
+ * Routine Description:
+ * Removes one signal from Channel pChannel's nth Queue at the
+ * time of the call and copies it into the memory pointed to by
+ * pSignal.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to where the signals are to be copied
+ *
+ * Assumptions:
+ * - pChannel and Queue are valid.
+ * - pSignal points to a memory area large enough to hold queue's SignalSize
+ *
+ * Return value:
+ * 1 if the removal succeeds, 0 if the queue was empty.
+ */
+unsigned char
+spar_signal_remove(struct channel_header __iomem *ch, u32 queue, void *sig)
+{
+ void __iomem *psource;
+ unsigned int head, tail;
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)((char __iomem *)ch +
+ readq(&ch->ch_space_offset)) + queue;
+
+ /* capture current head and tail */
+ head = readl(&pqhdr->head);
+ tail = readl(&pqhdr->tail);
+
+ /* queue is empty if the head index equals the tail index */
+ if (head == tail) {
+ writeq(readq(&pqhdr->num_empty) + 1, &pqhdr->num_empty);
+ return 0;
+ }
+
+ /* advance past the 'empty' front slot */
+ tail = (tail + 1) % readl(&pqhdr->max_slots);
+
+ /* copy signal from tail location to the area pointed to by pSignal */
+ psource = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) +
+ (tail * readl(&pqhdr->signal_size));
+ memcpy_fromio(sig, psource, readl(&pqhdr->signal_size));
+
+ mb(); /* channel synch */
+ writel(tail, &pqhdr->tail);
+
+ writeq(readq(&pqhdr->num_received) + 1,
+ &pqhdr->num_received);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(spar_signal_remove);
+
+/*
+ * Routine Description:
+ * Removes all signals present in Channel pChannel's nth Queue at the
+ * time of the call and copies them into the memory pointed to by
+ * pSignal. Returns the # of signals copied as the value of the routine.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ * pSignal: (IN) pointer to where the signals are to be copied
+ *
+ * Assumptions:
+ * - pChannel and Queue are valid.
+ * - pSignal points to a memory area large enough to hold Queue's MaxSignals
+ * # of signals, each of which is Queue's SignalSize.
+ *
+ * Return value:
+ * # of signals copied.
+ */
+unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue,
+ void *sig)
+{
+ void *psource;
+ unsigned int head, tail, count = 0;
+ struct signal_queue_header *pqhdr =
+ (struct signal_queue_header *)((char *)ch +
+ ch->ch_space_offset) + queue;
+
+ /* capture current head and tail */
+ head = pqhdr->head;
+ tail = pqhdr->tail;
+
+ /* queue is empty if the head index equals the tail index */
+ if (head == tail)
+ return 0;
+
+ while (head != tail) {
+ /* advance past the 'empty' front slot */
+ tail = (tail + 1) % pqhdr->max_slots;
+
+ /* copy signal from tail location to the area pointed
+ * to by pSignal
+ */
+ psource =
+ (char *)pqhdr + pqhdr->sig_base_offset +
+ (tail * pqhdr->signal_size);
+ memcpy((char *)sig + (pqhdr->signal_size * count),
+ psource, pqhdr->signal_size);
+
+ mb(); /* channel synch */
+ pqhdr->tail = tail;
+
+ count++;
+ pqhdr->num_received++;
+ }
+
+ return count;
+}
+
+/*
+ * Routine Description:
+ * Determine whether a signal queue is empty.
+ *
+ * Parameters:
+ * pChannel: (IN) points to the IO Channel
+ * Queue: (IN) nth Queue of the IO Channel
+ *
+ * Return value:
+ * 1 if the signal queue is empty, 0 otherwise.
+ */
+unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch,
+ u32 queue)
+{
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)((char __iomem *)ch +
+ readq(&ch->ch_space_offset)) + queue;
+ return readl(&pqhdr->head) == readl(&pqhdr->tail);
+}
+EXPORT_SYMBOL_GPL(spar_signalqueue_empty);
+
+unsigned long long
+uisqueue_interlocked_or(unsigned long long __iomem *tgt,
+ unsigned long long set)
+{
+ unsigned long long i;
+ unsigned long long j;
+
+ j = readq(tgt);
+ do {
+ i = j;
+ j = cmpxchg((__force unsigned long long *)tgt, i, i | set);
+
+ } while (i != j);
+
+ return j;
+}
+EXPORT_SYMBOL_GPL(uisqueue_interlocked_or);
+
+unsigned long long
+uisqueue_interlocked_and(unsigned long long __iomem *tgt,
+ unsigned long long set)
+{
+ unsigned long long i;
+ unsigned long long j;
+
+ j = readq(tgt);
+ do {
+ i = j;
+ j = cmpxchg((__force unsigned long long *)tgt, i, i & set);
+
+ } while (i != j);
+
+ return j;
+}
+EXPORT_SYMBOL_GPL(uisqueue_interlocked_and);
+
+static u8
+do_locked_client_insert(struct uisqueue_info *queueinfo,
+ unsigned int whichqueue,
+ void *signal,
+ spinlock_t *lock,
+ u8 *channel_id)
+{
+ unsigned long flags;
+ u8 rc = 0;
+
+ spin_lock_irqsave(lock, flags);
+ if (!spar_channel_client_acquire_os(queueinfo->chan, channel_id))
+ goto unlock;
+ if (spar_signal_insert(queueinfo->chan, whichqueue, signal)) {
+ queueinfo->packets_sent++;
+ rc = 1;
+ }
+ spar_channel_client_release_os(queueinfo->chan, channel_id);
+unlock:
+ spin_unlock_irqrestore((spinlock_t *)lock, flags);
+ return rc;
+}
+
+int
+uisqueue_put_cmdrsp_with_lock_client(struct uisqueue_info *queueinfo,
+ struct uiscmdrsp *cmdrsp,
+ unsigned int whichqueue,
+ void *insertlock,
+ unsigned char issue_irq_if_empty,
+ u64 irq_handle,
+ char oktowait, u8 *channel_id)
+{
+ while (!do_locked_client_insert(queueinfo, whichqueue, cmdrsp,
+ (spinlock_t *)insertlock,
+ channel_id)) {
+ if (oktowait != OK_TO_WAIT)
+ return 0; /* failed to queue */
+
+ /* try again */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(10));
+ }
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uisqueue_put_cmdrsp_with_lock_client);
+
+/* uisqueue_get_cmdrsp gets the cmdrsp entry at the head of the queue
+ * returns NULL if queue is empty */
+int
+uisqueue_get_cmdrsp(struct uisqueue_info *queueinfo,
+ void *cmdrsp, unsigned int whichqueue)
+{
+ if (!spar_signal_remove(queueinfo->chan, whichqueue, cmdrsp))
+ return 0;
+
+ queueinfo->packets_received++;
+
+ return 1; /* Success */
+}
+EXPORT_SYMBOL_GPL(uisqueue_get_cmdrsp);
diff --git a/kernel/drivers/staging/unisys/uislib/uisthread.c b/kernel/drivers/staging/unisys/uislib/uisthread.c
new file mode 100644
index 000000000..d3c973b61
--- /dev/null
+++ b/kernel/drivers/staging/unisys/uislib/uisthread.c
@@ -0,0 +1,69 @@
+/* uisthread.c
+ *
+ * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+/* @ALL_INSPECTED */
+#include <asm/processor.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include "uisutils.h"
+#include "uisthread.h"
+
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC UISLIB_PC_uisthread_c
+#define __MYFILE__ "uisthread.c"
+
+/*****************************************************/
+/* Exported functions */
+/*****************************************************/
+
+/* returns 0 for failure, 1 for success */
+int
+uisthread_start(struct uisthread_info *thrinfo,
+ int (*threadfn)(void *), void *thrcontext, char *name)
+{
+ /* used to stop the thread */
+ init_completion(&thrinfo->has_stopped);
+ thrinfo->task = kthread_run(threadfn, thrcontext, name);
+ if (IS_ERR(thrinfo->task)) {
+ thrinfo->id = 0;
+ return 0; /* failure */
+ }
+ thrinfo->id = thrinfo->task->pid;
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uisthread_start);
+
+void
+uisthread_stop(struct uisthread_info *thrinfo)
+{
+ int stopped = 0;
+
+ if (thrinfo->id == 0)
+ return; /* thread not running */
+
+ kthread_stop(thrinfo->task);
+ /* give up if the thread has NOT died in 1 minute */
+ if (wait_for_completion_timeout(&thrinfo->has_stopped, 60 * HZ))
+ stopped = 1;
+
+ if (stopped)
+ thrinfo->id = 0;
+}
+EXPORT_SYMBOL_GPL(uisthread_stop);
diff --git a/kernel/drivers/staging/unisys/uislib/uisutils.c b/kernel/drivers/staging/unisys/uislib/uisutils.c
new file mode 100644
index 000000000..26ab76526
--- /dev/null
+++ b/kernel/drivers/staging/unisys/uislib/uisutils.c
@@ -0,0 +1,137 @@
+/* uisutils.c
+ *
+ * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include "uisutils.h"
+#include "version.h"
+#include "vbushelper.h"
+#include <linux/skbuff.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/highmem.h>
+#endif
+
+/* this is shorter than using __FILE__ (full path name) in
+ * debug/info/error messages
+ */
+#define CURRENT_FILE_PC UISLIB_PC_uisutils_c
+#define __MYFILE__ "uisutils.c"
+
+/* exports */
+atomic_t uisutils_registered_services = ATOMIC_INIT(0);
+ /* num registrations via
+ * uisctrl_register_req_handler() or
+ * uisctrl_register_req_handler_ex() */
+
+/*****************************************************/
+/* Utility functions */
+/*****************************************************/
+
+int
+uisutil_add_proc_line_ex(int *total, char **buffer, int *buffer_remaining,
+ char *format, ...)
+{
+ va_list args;
+ int len;
+
+ va_start(args, format);
+ len = vsnprintf(*buffer, *buffer_remaining, format, args);
+ va_end(args);
+ if (len >= *buffer_remaining) {
+ *buffer += *buffer_remaining;
+ *total += *buffer_remaining;
+ *buffer_remaining = 0;
+ return -1;
+ }
+ *buffer_remaining -= len;
+ *buffer += len;
+ *total += len;
+ return len;
+}
+EXPORT_SYMBOL_GPL(uisutil_add_proc_line_ex);
+
+int
+uisctrl_register_req_handler(int type, void *fptr,
+ struct ultra_vbus_deviceinfo *chipset_driver_info)
+{
+ switch (type) {
+ case 2:
+ if (fptr) {
+ if (!virt_control_chan_func)
+ atomic_inc(&uisutils_registered_services);
+ virt_control_chan_func = fptr;
+ } else {
+ if (virt_control_chan_func)
+ atomic_dec(&uisutils_registered_services);
+ virt_control_chan_func = NULL;
+ }
+ break;
+
+ default:
+ return 0;
+ }
+ if (chipset_driver_info)
+ bus_device_info_init(chipset_driver_info, "chipset", "uislib",
+ VERSION, NULL);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(uisctrl_register_req_handler);
+
+/*
+ * unsigned int uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx,
+ * void *skb_in,
+ * unsigned int firstfraglen,
+ * unsigned int frags_max,
+ * struct phys_info frags[])
+ *
+ * calling_ctx - input - a string that is displayed to show
+ * who called * this func
+ * void *skb_in - skb whose frag info we're copying type is hidden so we
+ * don't need to include skbbuff in uisutils.h which is
+ * included in non-networking code.
+ * unsigned int firstfraglen - input - length of first fragment in skb
+ * unsigned int frags_max - input - max len of frags array
+ * struct phys_info frags[] - output - frags array filled in on output
+ * return value indicates number of
+ * entries filled in frags
+ */
+
+static LIST_HEAD(req_handler_info_list); /* list of struct req_handler_info */
+static DEFINE_SPINLOCK(req_handler_info_list_lock);
+
+struct req_handler_info *
+req_handler_find(uuid_le switch_uuid)
+{
+ struct list_head *lelt, *tmp;
+ struct req_handler_info *entry = NULL;
+
+ spin_lock(&req_handler_info_list_lock);
+ list_for_each_safe(lelt, tmp, &req_handler_info_list) {
+ entry = list_entry(lelt, struct req_handler_info, list_link);
+ if (uuid_le_cmp(entry->switch_uuid, switch_uuid) == 0) {
+ spin_unlock(&req_handler_info_list_lock);
+ return entry;
+ }
+ }
+ spin_unlock(&req_handler_info_list_lock);
+ return NULL;
+}