From 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Tue, 4 Aug 2015 12:17:53 -0700 Subject: Add the rt linux 4.1.3-rt3 as base Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang --- .../Documentation/ABI/sysfs-platform-visorchipset | 101 + .../staging/unisys/Documentation/overview.txt | 174 ++ .../staging/unisys/Documentation/proc-entries.txt | 93 + kernel/drivers/staging/unisys/Kconfig | 19 + kernel/drivers/staging/unisys/MAINTAINERS | 6 + kernel/drivers/staging/unisys/Makefile | 9 + kernel/drivers/staging/unisys/TODO | 21 + .../unisys/common-spar/include/channels/channel.h | 590 +++++ .../common-spar/include/channels/channel_guid.h | 61 + .../include/channels/controlframework.h | 62 + .../include/channels/controlvmchannel.h | 511 +++++ .../common-spar/include/channels/diagchannel.h | 427 ++++ .../common-spar/include/channels/iochannel.h | 784 +++++++ .../common-spar/include/channels/vbuschannel.h | 94 + .../include/controlvmcompletionstatus.h | 94 + .../include/diagnostics/appos_subsystems.h | 310 +++ .../unisys/common-spar/include/iovmcall_gnuc.h | 49 + .../unisys/common-spar/include/vbusdeviceinfo.h | 213 ++ .../staging/unisys/common-spar/include/version.h | 45 + .../unisys/common-spar/include/vmcallinterface.h | 163 ++ .../staging/unisys/include/guestlinuxdebug.h | 180 ++ .../drivers/staging/unisys/include/periodic_work.h | 38 + .../staging/unisys/include/procobjecttree.h | 47 + kernel/drivers/staging/unisys/include/sparstop.h | 30 + kernel/drivers/staging/unisys/include/timskmod.h | 153 ++ kernel/drivers/staging/unisys/include/uisqueue.h | 396 ++++ kernel/drivers/staging/unisys/include/uisthread.h | 42 + kernel/drivers/staging/unisys/include/uisutils.h | 299 +++ kernel/drivers/staging/unisys/include/vbushelper.h | 47 + kernel/drivers/staging/unisys/uislib/Kconfig | 10 + kernel/drivers/staging/unisys/uislib/Makefile | 12 + kernel/drivers/staging/unisys/uislib/uislib.c | 1372 ++++++++++++ kernel/drivers/staging/unisys/uislib/uisqueue.c | 322 +++ kernel/drivers/staging/unisys/uislib/uisthread.c | 69 + kernel/drivers/staging/unisys/uislib/uisutils.c | 137 ++ kernel/drivers/staging/unisys/virthba/Kconfig | 13 + kernel/drivers/staging/unisys/virthba/Makefile | 12 + kernel/drivers/staging/unisys/virthba/virthba.c | 1572 +++++++++++++ kernel/drivers/staging/unisys/virthba/virthba.h | 27 + kernel/drivers/staging/unisys/virtpci/Kconfig | 10 + kernel/drivers/staging/unisys/virtpci/Makefile | 10 + kernel/drivers/staging/unisys/virtpci/virtpci.c | 1394 ++++++++++++ kernel/drivers/staging/unisys/virtpci/virtpci.h | 103 + kernel/drivers/staging/unisys/visorchannel/Kconfig | 10 + .../drivers/staging/unisys/visorchannel/Makefile | 12 + .../drivers/staging/unisys/visorchannel/globals.h | 27 + .../staging/unisys/visorchannel/visorchannel.h | 76 + .../unisys/visorchannel/visorchannel_funcs.c | 665 ++++++ .../unisys/visorchannel/visorchannel_main.c | 50 + kernel/drivers/staging/unisys/visorchipset/Kconfig | 11 + .../drivers/staging/unisys/visorchipset/Makefile | 15 + kernel/drivers/staging/unisys/visorchipset/file.c | 160 ++ kernel/drivers/staging/unisys/visorchipset/file.h | 27 + .../drivers/staging/unisys/visorchipset/globals.h | 42 + .../drivers/staging/unisys/visorchipset/parser.c | 430 ++++ .../drivers/staging/unisys/visorchipset/parser.h | 46 + .../staging/unisys/visorchipset/visorchipset.h | 236 ++ .../unisys/visorchipset/visorchipset_main.c | 2335 ++++++++++++++++++++ .../unisys/visorchipset/visorchipset_umode.h | 35 + kernel/drivers/staging/unisys/visorutil/Kconfig | 9 + kernel/drivers/staging/unisys/visorutil/Makefile | 9 + .../drivers/staging/unisys/visorutil/charqueue.c | 127 ++ .../drivers/staging/unisys/visorutil/charqueue.h | 37 + .../drivers/staging/unisys/visorutil/memregion.h | 43 + .../staging/unisys/visorutil/memregion_direct.c | 207 ++ .../staging/unisys/visorutil/periodic_work.c | 204 ++ .../staging/unisys/visorutil/visorkmodutils.c | 71 + 67 files changed, 15005 insertions(+) create mode 100644 kernel/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset create mode 100644 kernel/drivers/staging/unisys/Documentation/overview.txt create mode 100644 kernel/drivers/staging/unisys/Documentation/proc-entries.txt create mode 100644 kernel/drivers/staging/unisys/Kconfig create mode 100644 kernel/drivers/staging/unisys/MAINTAINERS create mode 100644 kernel/drivers/staging/unisys/Makefile create mode 100644 kernel/drivers/staging/unisys/TODO create mode 100644 kernel/drivers/staging/unisys/common-spar/include/channels/channel.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/channels/channel_guid.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/channels/controlframework.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/channels/diagchannel.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/channels/iochannel.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/version.h create mode 100644 kernel/drivers/staging/unisys/common-spar/include/vmcallinterface.h create mode 100644 kernel/drivers/staging/unisys/include/guestlinuxdebug.h create mode 100644 kernel/drivers/staging/unisys/include/periodic_work.h create mode 100644 kernel/drivers/staging/unisys/include/procobjecttree.h create mode 100644 kernel/drivers/staging/unisys/include/sparstop.h create mode 100644 kernel/drivers/staging/unisys/include/timskmod.h create mode 100644 kernel/drivers/staging/unisys/include/uisqueue.h create mode 100644 kernel/drivers/staging/unisys/include/uisthread.h create mode 100644 kernel/drivers/staging/unisys/include/uisutils.h create mode 100644 kernel/drivers/staging/unisys/include/vbushelper.h create mode 100644 kernel/drivers/staging/unisys/uislib/Kconfig create mode 100644 kernel/drivers/staging/unisys/uislib/Makefile create mode 100644 kernel/drivers/staging/unisys/uislib/uislib.c create mode 100644 kernel/drivers/staging/unisys/uislib/uisqueue.c create mode 100644 kernel/drivers/staging/unisys/uislib/uisthread.c create mode 100644 kernel/drivers/staging/unisys/uislib/uisutils.c create mode 100644 kernel/drivers/staging/unisys/virthba/Kconfig create mode 100644 kernel/drivers/staging/unisys/virthba/Makefile create mode 100644 kernel/drivers/staging/unisys/virthba/virthba.c create mode 100644 kernel/drivers/staging/unisys/virthba/virthba.h create mode 100644 kernel/drivers/staging/unisys/virtpci/Kconfig create mode 100644 kernel/drivers/staging/unisys/virtpci/Makefile create mode 100644 kernel/drivers/staging/unisys/virtpci/virtpci.c create mode 100644 kernel/drivers/staging/unisys/virtpci/virtpci.h create mode 100644 kernel/drivers/staging/unisys/visorchannel/Kconfig create mode 100644 kernel/drivers/staging/unisys/visorchannel/Makefile create mode 100644 kernel/drivers/staging/unisys/visorchannel/globals.h create mode 100644 kernel/drivers/staging/unisys/visorchannel/visorchannel.h create mode 100644 kernel/drivers/staging/unisys/visorchannel/visorchannel_funcs.c create mode 100644 kernel/drivers/staging/unisys/visorchannel/visorchannel_main.c create mode 100644 kernel/drivers/staging/unisys/visorchipset/Kconfig create mode 100644 kernel/drivers/staging/unisys/visorchipset/Makefile create mode 100644 kernel/drivers/staging/unisys/visorchipset/file.c create mode 100644 kernel/drivers/staging/unisys/visorchipset/file.h create mode 100644 kernel/drivers/staging/unisys/visorchipset/globals.h create mode 100644 kernel/drivers/staging/unisys/visorchipset/parser.c create mode 100644 kernel/drivers/staging/unisys/visorchipset/parser.h create mode 100644 kernel/drivers/staging/unisys/visorchipset/visorchipset.h create mode 100644 kernel/drivers/staging/unisys/visorchipset/visorchipset_main.c create mode 100644 kernel/drivers/staging/unisys/visorchipset/visorchipset_umode.h create mode 100644 kernel/drivers/staging/unisys/visorutil/Kconfig create mode 100644 kernel/drivers/staging/unisys/visorutil/Makefile create mode 100644 kernel/drivers/staging/unisys/visorutil/charqueue.c create mode 100644 kernel/drivers/staging/unisys/visorutil/charqueue.h create mode 100644 kernel/drivers/staging/unisys/visorutil/memregion.h create mode 100644 kernel/drivers/staging/unisys/visorutil/memregion_direct.c create mode 100644 kernel/drivers/staging/unisys/visorutil/periodic_work.c create mode 100644 kernel/drivers/staging/unisys/visorutil/visorkmodutils.c (limited to 'kernel/drivers/staging/unisys') diff --git a/kernel/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset b/kernel/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset new file mode 100644 index 000000000..28f8f1233 --- /dev/null +++ b/kernel/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset @@ -0,0 +1,101 @@ +What: install/error +Date: 7/18/2014 +KernelVersion: TBD +Contact: sparmaintainer@unisys.com +Description: used to send the ID of a string that should be displayed on + s-Par's automatic installation progress screen when an error + is encountered during installation. This field has no effect + if not in installation mode. +Users: sparmaintainer@unisys.com + +What: install/remainingsteps +Date: 7/18/2014 +KernelVersion: TBD +Contact: sparmaintainer@unisys.com +Description: used to set the value of the progress bar on the s-Par automatic + installation progress screen. This field has no effect if not in + installation mode. +Users: sparmaintainer@unisys.com + +What: install/textid +Date: 7/18/2014 +KernelVersion: TBD +Contact: sparmaintainer@unisys.com +Description: used to send the ID of a string that should be displayed on + s-Par's automatic installation progress screen. Setting this + field when not in installation mode (boottotool was set on + the previous guest boot) has no effect. +Users: sparmaintainer@unisys.com + +What: install/boottotool +Date: 7/18/2014 +KernelVersion: TBD +Contact: sparmaintainer@unisys.com +Description: The boottotool flag controls s-Par behavior on the next boot of + this guest. Setting the flag will cause the guest to boot from + the utility and installation image, which will use the value in + the toolaction field to determine what operation is being + requested. +Users: sparmaintainer@unisys.com + +What: install/toolaction +Date: 7/18/2014 +KernelVersion: TBD +Contact: sparmaintainer@unisys.com +Description: This field is used to tell s-Par which type of recovery tool + action to perform on the next guest boot-up. The meaning of the + value is dependent on the type of installation software used to + commission the guest. +Users: sparmaintainer@unisys.com + +What: guest/chipsetready +Date: 7/18/2014 +KernelVersion: TBD +Contact: sparmaintainer@unisys.com +Description: This entry is used by Unisys application software on the guest + to acknowledge completion of specific events for integration + purposes, but these acknowledgements are not required for the + guest to operate correctly. The interface accepts one of two + strings: MODULES_LOADED to indicate that the s-Par driver + modules have been loaded successfully, or CALLHOMEDISK_MOUNTED, + which indicates that the disk used to support call home services + has been successfully mounted. +Users: sparmaintainer@unisys.com + +What: parahotplug/deviceenabled +Date: 7/18/2014 +KernelVersion: TBD +Contact: sparmaintainer@unisys.com +Description: This entry is used by a Unisys support script installed on the + guest, and triggered by a udev event. The support script is + responsible for enabling and disabling SR-IOV devices when the + PF device is being recovered in another guest. + + Some SR-IOV devices have problems when the PF is reset without + first disabling all VFs attached to that PF. s-Par handles this + situation by sending a message to guests using these VFs, and + the script will disable the device. When the PF is recovered, + another message is sent to the guests to re-enable the VFs. + + The parahotplug/deviceenabled interface is used to acknowledge + the recovery message. +Users: sparmaintainer@unisys.com + +What: parahotplug/devicedisabled +Date: 7/18/2014 +KernelVersion: TBD +Contact: sparmaintainer@unisys.com +Description: This entry is used by a Unisys support script installed on the + guest, and triggered by a udev event. The support script is + responsible for enabling and disabling SR-IOV devices when the + PF device is being recovered in another guest. + + Some SR-IOV devices have problems when the PF is reset without + first disabling all VFs attached to that PF. s-Par handles this + situation by sending a message to guests using these VFs, and + the script will disable the device. When the PF is recovered, + another message is sent to the guests to re-enable the VFs. + + The parahotplug/devicedisaabled interface is used to acknowledge + the initial recovery message. +Users: sparmaintainer@unisys.com diff --git a/kernel/drivers/staging/unisys/Documentation/overview.txt b/kernel/drivers/staging/unisys/Documentation/overview.txt new file mode 100644 index 000000000..8d078e4de --- /dev/null +++ b/kernel/drivers/staging/unisys/Documentation/overview.txt @@ -0,0 +1,174 @@ + +Overview + +This document describes the driver set for Unisys Secure Partitioning (s-ParĀ®). + +s-Par is firmware that provides hardware partitioning capabilities for +splitting large-scale Intel x86 servers into multiple isolated +partitions. s-Par provides a set of para-virtualized device drivers to +allow guest partitions on the same server to share devices that would +normally be unsharable; specifically, PCI network interfaces and host +bus adapters that do not support shared access via SR-IOV. The shared +device is owned and managed by a small, single-purpose service +partition, which communicates with each guest partition sharing that +device through an area of shared memory called a channel. Additional +drivers provide support interfaces for communicating with s-Par +services, logging and diagnostics, and accessing the Linux console +from the s-Par user interface. + +The driver stack consists of a set of support modules, a set of bus +modules, and a set of device driver modules. The support modules +handle a number of common functions across each of the other +drivers. The bus modules provide organization for the device driver +modules, which provide the shared device functionality. + +These drivers are for the Unisys virtual PCI hardware model where the +hypervisor need not intervene (other than normal interrupt handling) +in the interactions between the client drivers and the virtual adapter +firmware in the adapter service partition. + +Driver Descriptions + +Device Modules + +The modules in this section handle shared devices and the virtual +buses required to support them. These modules use functions in and +depend on the modules described in the support modules section. + +visorchipset + +The visorchipset module receives device creation and destruction +events from the Command service partition of s-Par, as well as +controlling registration of shared device drivers with the s-Par +driver core. The events received are used to populate other s-Par +modules with their assigned shared devices. Visorchipset is required +for shared device drivers to function properly. Visorchipset also +stores information for handling dump disk device creation during +kdump. + +In operation, the visorchipset module processes device creation and +destruction messages sent by s-Par's Command service partition through +a channel. These messages result in creation (or destruction) of each +virtual bus and virtual device. Each bus and device is also associated +with a communication channel, which is used to communicate with one or +more IO service partitions to perform device IO on behalf of the +guest. + +virthba + +The virthba module provides access to a shared SCSI host bus adapter +and one or more disk devices, by proxying SCSI commands between the +guest and the service partition that owns the shared SCSI adapter, +using a channel between the guest and the service partition. The disks +that appear on the shared bus are defined by the s-Par configuration +and enforced by the service partition, while the guest driver handles +sending commands and handling responses. Each disk is shared as a +whole to a guest. Sharing the bus adapter in this way provides +resiliency; should the device encounter an error, only the service +partition is rebooted, and the device is reinitialized. This allows +guests to continue running and to recover from the error. + +virtnic + +The virtnic module provides a paravirtualized network interface to a +guest by proxying buffer information between the guest and the service +partition that owns the shared network interface, using a channel +between the guest and the service partition. The connectivity of this +interface with the shared interface and possibly other guest +partitions is defined by the s-Par configuration and enforced by the +service partition; the guest driver handles communication and link +status. + +visorserial + +The visorserial module allows the console of the linux guest to be +accessed via the s-Par console serial channel. It creates devices in +/dev/visorserialclientX which behave like a serial terminal and are +connected to the diagnostics system in s-Par. By assigning a getty to +the terminal in the guest, a user could log into and access the guest +from the s-Par diagnostics SWITCH RUN terminal. + +visorbus + +The visorbus module handles the bus functions for most functional +drivers except visorserial, visordiag, virthba, and virtnic. It +maintains the sysfs subtree /sys/devices/visorbus*/. It is responsible +for device creation and destruction of the devices on its bus. + +visorclientbus + +The visorclientbus module forwards the bus functions for virthba, and +virtnic to the virtpci driver. + +virtpci + +The virtpci module handles the bus functions for virthba, and virtnic. + +s-Par Integration Modules + +The modules in this section provide integration with s-Par guest +partition services like diagnostics and remote desktop. These modules +depend on functions in the modules described in the support modules +section. + +visorvideoclient + +The visorvideoclient module provides functionality for video support +for the Unisys s-Par Partition Desktop application. The guest OS must +also have the UEFI GOP protocol enabled for the partition desktop to +function. visorconinclient The visorconinclient module provides +keyboard and mouse support for the Unisys s-Par Partition Desktop +application. + +sparstop + +The sparstop module handles requests from the Unisys s-Par platform to +shutdown the linux guest. It allows a program on the guest to perform +clean-up functions on the guest before the guest is shut down or +rebooted using ACPI. + +visordiag + +This driver provides the ability for the guest to write information +into the s-Par diagnostics subsystem. It creates a set of devices +named /dev/visordiag.X which can be written to by the guest to add +text to the s-Par system log. + +Support Modules + +The modules described in this section provide functions and +abstractions to support the modules described in the previous +sections, to avoid having duplicated functionality. + +visornoop + +The visornoop module is a placeholder that responds to device +create/destroy messages that are currently not in use by linux guests. + +visoruislib + +The visoruislib module is a support library, used to handle requests +from virtpci. + +visorchannelstub + +The visorchannelstub module provides support routines for storing and +retrieving data from a channel. + +visorchannel + +The visorchannel module is a support library that abstracts reading +and writing a channel in memory. + +visorutil + +The visorutil module is a support library required by all other s-Par +driver modules. Among its features it abstracts reading, writing, and +manipulating a block of memory. + +Minimum Required Driver Set + +The drivers required to boot a Linux guest are visorchipset, visorbus, +visorvideoclient, visorconinclient, visoruislib, visorchannelstub, +visorchannel, and visorutil. The other drivers are required by the +product configurations that are currently being marketed. diff --git a/kernel/drivers/staging/unisys/Documentation/proc-entries.txt b/kernel/drivers/staging/unisys/Documentation/proc-entries.txt new file mode 100644 index 000000000..426f92b1c --- /dev/null +++ b/kernel/drivers/staging/unisys/Documentation/proc-entries.txt @@ -0,0 +1,93 @@ + s-Par Proc Entries +This document describes the proc entries created by the Unisys s-Par modules. + +Support Module Entries +These entries are provided primarily for debugging. + +/proc/uislib/info: This entry contains debugging information for the +uislib module, including bus information and memory usage. + +/proc/visorchipset/controlvm: This directory contains debugging +entries for the controlvm channel used by visorchipset. + +/proc/uislib/platform: This entry is used to display the platform +number this node is in the system. For some guests, this may be +invalid. + +/proc/visorchipset/chipsetready: This entry is written to by scripts +to signify that any user level activity has been completed before the +guest can be considered running and is shown as running in the s-Par +UI. + +Device Entries +These entries provide status of the devices shared by a service partition. + +/proc/uislib/vbus: this is a directory containing entries for each +virtual bus. Each numbered sub-directory contains an info entry, which +describes the devices that appear on that bus. + +/proc/uislib/cycles_before_wait: This entry is used to tune +performance, by setting the number of cycles we wait before going idle +when in polling mode. A longer time will reduce message latency but +spend more processing time polling. + +/proc/uislib/smart_wakeup: This entry is used to tune performance, by +enabling or disabling smart wakeup. + +/proc/virthba/info: This entry contains debugging information for the +virthba module, including interrupt information and memory usage. + +/proc/virthba/enable_ints: This entry controls interrupt use by the +virthba module. Writing a 0 to this entry will disable interrupts. + +/proc/virtnic/info: This entry contains debugging information for the +virtnic module, including interrupt information, send and receive +counts, and other device information. + +/proc/virtnic/ethX: This is a directory containing entries for each +virtual NIC. Each named subdirectory contains two entries, +clientstring and zone. + +/proc/virtpci/info: This entry contains debugging information for the +virtpci module, including virtual PCI bus information and device +locations. + +/proc/virtnic/enable_ints: This entry controls interrupt use by the +virtnic module. Writing a 0 to this entry will disable interrupts. + +Visorconinclient, visordiag, visornoop, visorserialclient, and +visorvideoclient Entries + +The entries in proc for these modules all follow the same +pattern. Each module has its own proc directory with the same name, +e.g. visordiag presents a /proc/visordiag directory. Inside of the +module's directory are a device directory, which contains one numbered +directory for each device provided by that module. Each device has a +diag entry that presents the device number and visorbus name for that +device. The module directory also has a driver/diag entry, which +reports the corresponding s-Par version number of the driver. + +Automated Installation Entries + +These entries are used to pass information between the s-Par platform +and the Linux-based installation and recovery tool. These values are +read/write, however, the guest can only reset them to 0, or report an +error status through the installer entry. The values are only set via +s-Par's firmware interface, to help prevent accidentally booting into +the tool. + +/proc/visorchipset/boottotool: This entry instructs s-Par that the +next reboot will launch the installation and recovery tool. If set to +0, the next boot will happen according to the UEFI boot manager +settings. + +/proc/visorchipset/toolaction: This entry indicates the installation +and recovery tool mode requested for the next boot. + +/proc/visorchipset/installer: this entry is used by the installation +and recovery tool to pass status and result information back to the +s-Par firmware. + +/proc/visorchipset/partition: This directory contains the guest +partition configuration data for each virtual bus, for use during +installation and at runtime for s-Par service partitions. diff --git a/kernel/drivers/staging/unisys/Kconfig b/kernel/drivers/staging/unisys/Kconfig new file mode 100644 index 000000000..19fcb3465 --- /dev/null +++ b/kernel/drivers/staging/unisys/Kconfig @@ -0,0 +1,19 @@ +# +# Unisys SPAR driver configuration +# +menuconfig UNISYSSPAR + bool "Unisys SPAR driver support" + depends on X86_64 + ---help--- + Support for the Unisys SPAR drivers + +if UNISYSSPAR + +source "drivers/staging/unisys/visorutil/Kconfig" +source "drivers/staging/unisys/visorchannel/Kconfig" +source "drivers/staging/unisys/visorchipset/Kconfig" +source "drivers/staging/unisys/uislib/Kconfig" +source "drivers/staging/unisys/virtpci/Kconfig" +source "drivers/staging/unisys/virthba/Kconfig" + +endif # UNISYSSPAR diff --git a/kernel/drivers/staging/unisys/MAINTAINERS b/kernel/drivers/staging/unisys/MAINTAINERS new file mode 100644 index 000000000..c9cef0b91 --- /dev/null +++ b/kernel/drivers/staging/unisys/MAINTAINERS @@ -0,0 +1,6 @@ +Unisys s-Par drivers +M: Ben Romer +S: Maintained +F: Documentation/s-Par/overview.txt +F: Documentation/s-Par/proc-entries.txt +F: drivers/staging/unisys/ diff --git a/kernel/drivers/staging/unisys/Makefile b/kernel/drivers/staging/unisys/Makefile new file mode 100644 index 000000000..68b9925e7 --- /dev/null +++ b/kernel/drivers/staging/unisys/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for Unisys SPAR drivers +# +obj-$(CONFIG_UNISYS_VISORUTIL) += visorutil/ +obj-$(CONFIG_UNISYS_VISORCHANNEL) += visorchannel/ +obj-$(CONFIG_UNISYS_VISORCHIPSET) += visorchipset/ +obj-$(CONFIG_UNISYS_UISLIB) += uislib/ +obj-$(CONFIG_UNISYS_VIRTPCI) += virtpci/ +obj-$(CONFIG_UNISYS_VIRTHBA) += virthba/ diff --git a/kernel/drivers/staging/unisys/TODO b/kernel/drivers/staging/unisys/TODO new file mode 100644 index 000000000..034ac61c4 --- /dev/null +++ b/kernel/drivers/staging/unisys/TODO @@ -0,0 +1,21 @@ +TODO: + -checkpatch warnings + -move /proc entries to /sys + -proper major number(s) + -add other drivers needed for full functionality: + -visorclientbus + -visorbus + -visordiag + -virtnic + -visornoop + -visorserial + -visorvideoclient + -visorconinclient + -sparstop + -move individual drivers into proper driver subsystems + + +Patches to: + Greg Kroah-Hartman + Ken Cox + Unisys s-Par maintainer mailing list diff --git a/kernel/drivers/staging/unisys/common-spar/include/channels/channel.h b/kernel/drivers/staging/unisys/common-spar/include/channels/channel.h new file mode 100644 index 000000000..6fb6e5b3d --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/channels/channel.h @@ -0,0 +1,590 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __CHANNEL_H__ +#define __CHANNEL_H__ + +#include +#include +#include + +/* +* Whenever this file is changed a corresponding change must be made in +* the Console/ServicePart/visordiag_early/supervisor_channel.h file +* which is needed for Linux kernel compiles. These two files must be +* in sync. +*/ + +/* define the following to prevent include nesting in kernel header + * files of similar abbreviated content + */ +#define __SUPERVISOR_CHANNEL_H__ + +#define SIGNATURE_16(A, B) ((A) | (B<<8)) +#define SIGNATURE_32(A, B, C, D) \ + (SIGNATURE_16(A, B) | (SIGNATURE_16(C, D) << 16)) +#define SIGNATURE_64(A, B, C, D, E, F, G, H) \ + (SIGNATURE_32(A, B, C, D) | ((u64)(SIGNATURE_32(E, F, G, H)) << 32)) + +#ifndef lengthof +#define lengthof(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER)) +#endif +#ifndef COVERQ +#define COVERQ(v, d) (((v)+(d)-1) / (d)) +#endif +#ifndef COVER +#define COVER(v, d) ((d)*COVERQ(v, d)) +#endif + +#define ULTRA_CHANNEL_PROTOCOL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L') + +enum channel_serverstate { + CHANNELSRV_UNINITIALIZED = 0, /* channel is in an undefined state */ + CHANNELSRV_READY = 1 /* channel has been initialized by server */ +}; + +enum channel_clientstate { + CHANNELCLI_DETACHED = 0, + CHANNELCLI_DISABLED = 1, /* client can see channel but is NOT + * allowed to use it unless given TBD + * explicit request (should actually be + * < DETACHED) */ + CHANNELCLI_ATTACHING = 2, /* legacy EFI client request + * for EFI server to attach */ + CHANNELCLI_ATTACHED = 3, /* idle, but client may want + * to use channel any time */ + CHANNELCLI_BUSY = 4, /* client either wants to use or is + * using channel */ + CHANNELCLI_OWNED = 5 /* "no worries" state - client can + * access channel anytime */ +}; + +static inline const u8 * +ULTRA_CHANNELCLI_STRING(u32 v) +{ + switch (v) { + case CHANNELCLI_DETACHED: + return (const u8 *)("DETACHED"); + case CHANNELCLI_DISABLED: + return (const u8 *)("DISABLED"); + case CHANNELCLI_ATTACHING: + return (const u8 *)("ATTACHING"); + case CHANNELCLI_ATTACHED: + return (const u8 *)("ATTACHED"); + case CHANNELCLI_BUSY: + return (const u8 *)("BUSY"); + case CHANNELCLI_OWNED: + return (const u8 *)("OWNED"); + default: + break; + } + return (const u8 *)("?"); +} + +#define SPAR_CHANNEL_SERVER_READY(ch) \ + (readl(&(ch)->srv_state) == CHANNELSRV_READY) + +#define ULTRA_VALID_CHANNELCLI_TRANSITION(o, n) \ + (((((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_DISABLED)) || \ + (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DISABLED)) || \ + (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DISABLED)) || \ + (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DETACHED)) || \ + (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DETACHED)) || \ + (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_ATTACHING)) || \ + (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_ATTACHED)) || \ + (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_ATTACHED)) || \ + (((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_ATTACHED)) || \ + (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_BUSY)) || \ + (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_OWNED)) || \ + (((o) == CHANNELCLI_DISABLED) && ((n) == CHANNELCLI_OWNED)) || \ + (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_OWNED)) || \ + (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_OWNED)) || \ + (((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_OWNED)) || (0)) \ + ? (1) : (0)) + +#define SPAR_CHANNEL_CLIENT_CHK_TRANSITION(old, new, id, log, \ + file, line) \ + do { \ + if (!ULTRA_VALID_CHANNELCLI_TRANSITION(old, new)) \ + pr_info("%s Channel StateTransition INVALID! (%s) %s(%d)-->%s(%d) @%s:%d\n", \ + id, "CliState", \ + ULTRA_CHANNELCLI_STRING(old), \ + old, \ + ULTRA_CHANNELCLI_STRING(new), \ + new, \ + pathname_last_n_nodes((u8 *)file, 4), \ + line); \ + } while (0) + +#define SPAR_CHANNEL_CLIENT_TRANSITION(ch, id, newstate, log) \ + do { \ + SPAR_CHANNEL_CLIENT_CHK_TRANSITION( \ + readl(&(((struct channel_header __iomem *)\ + (ch))->cli_state_os)), \ + newstate, id, log, __FILE__, __LINE__); \ + pr_info("%s Channel StateTransition (%s) %s(%d)-->%s(%d) @%s:%d\n", \ + id, "CliStateOS", \ + ULTRA_CHANNELCLI_STRING( \ + readl(&((struct channel_header __iomem *)\ + (ch))->cli_state_os)), \ + readl(&((struct channel_header __iomem *)\ + (ch))->cli_state_os), \ + ULTRA_CHANNELCLI_STRING(newstate), \ + newstate, \ + pathname_last_n_nodes(__FILE__, 4), __LINE__); \ + writel(newstate, &((struct channel_header __iomem *)\ + (ch))->cli_state_os); \ + mb(); /* required for channel synch */ \ + } while (0) + +/* Values for ULTRA_CHANNEL_PROTOCOL.CliErrorBoot: */ +/* throttling invalid boot channel statetransition error due to client + * disabled */ +#define ULTRA_CLIERRORBOOT_THROTTLEMSG_DISABLED 0x01 + +/* throttling invalid boot channel statetransition error due to client + * not attached */ +#define ULTRA_CLIERRORBOOT_THROTTLEMSG_NOTATTACHED 0x02 + +/* throttling invalid boot channel statetransition error due to busy channel */ +#define ULTRA_CLIERRORBOOT_THROTTLEMSG_BUSY 0x04 + +/* Values for ULTRA_CHANNEL_PROTOCOL.CliErrorOS: */ +/* throttling invalid guest OS channel statetransition error due to + * client disabled */ +#define ULTRA_CLIERROROS_THROTTLEMSG_DISABLED 0x01 + +/* throttling invalid guest OS channel statetransition error due to + * client not attached */ +#define ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED 0x02 + +/* throttling invalid guest OS channel statetransition error due to + * busy channel */ +#define ULTRA_CLIERROROS_THROTTLEMSG_BUSY 0x04 + +/* Values for ULTRA_CHANNEL_PROTOCOL.Features: This define exists so +* that windows guest can look at the FeatureFlags in the io channel, +* and configure the windows driver to use interrupts or not based on +* this setting. This flag is set in uislib after the +* ULTRA_VHBA_init_channel is called. All feature bits for all +* channels should be defined here. The io channel feature bits are +* defined right here */ +#define ULTRA_IO_DRIVER_ENABLES_INTS (0x1ULL << 1) +#define ULTRA_IO_CHANNEL_IS_POLLING (0x1ULL << 3) +#define ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS (0x1ULL << 4) +#define ULTRA_IO_DRIVER_DISABLES_INTS (0x1ULL << 5) +#define ULTRA_IO_DRIVER_SUPPORTS_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6) + +#pragma pack(push, 1) /* both GCC and VC now allow this pragma */ +/* Common Channel Header */ +struct channel_header { + u64 signature; /* Signature */ + u32 legacy_state; /* DEPRECATED - being replaced by */ + /* / SrvState, CliStateBoot, and CliStateOS below */ + u32 header_size; /* sizeof(struct channel_header) */ + u64 size; /* Total size of this channel in bytes */ + u64 features; /* Flags to modify behavior */ + uuid_le chtype; /* Channel type: data, bus, control, etc. */ + u64 partition_handle; /* ID of guest partition */ + u64 handle; /* Device number of this channel in client */ + u64 ch_space_offset; /* Offset in bytes to channel specific area */ + u32 version_id; /* struct channel_header Version ID */ + u32 partition_index; /* Index of guest partition */ + uuid_le zone_uuid; /* Guid of Channel's zone */ + u32 cli_str_offset; /* offset from channel header to + * nul-terminated ClientString (0 if + * ClientString not present) */ + u32 cli_state_boot; /* CHANNEL_CLIENTSTATE of pre-boot + * EFI client of this channel */ + u32 cmd_state_cli; /* CHANNEL_COMMANDSTATE (overloaded in + * Windows drivers, see ServerStateUp, + * ServerStateDown, etc) */ + u32 cli_state_os; /* CHANNEL_CLIENTSTATE of Guest OS + * client of this channel */ + u32 ch_characteristic; /* CHANNEL_CHARACTERISTIC_ */ + u32 cmd_state_srv; /* CHANNEL_COMMANDSTATE (overloaded in + * Windows drivers, see ServerStateUp, + * ServerStateDown, etc) */ + u32 srv_state; /* CHANNEL_SERVERSTATE */ + u8 cli_error_boot; /* bits to indicate err states for + * boot clients, so err messages can + * be throttled */ + u8 cli_error_os; /* bits to indicate err states for OS + * clients, so err messages can be + * throttled */ + u8 filler[1]; /* Pad out to 128 byte cacheline */ + /* Please add all new single-byte values below here */ + u8 recover_channel; +}; + +#define ULTRA_CHANNEL_ENABLE_INTS (0x1ULL << 0) + +/* Subheader for the Signal Type variation of the Common Channel */ +struct signal_queue_header { + /* 1st cache line */ + u32 version; /* SIGNAL_QUEUE_HEADER Version ID */ + u32 chtype; /* Queue type: storage, network */ + u64 size; /* Total size of this queue in bytes */ + u64 sig_base_offset; /* Offset to signal queue area */ + u64 features; /* Flags to modify behavior */ + u64 num_sent; /* Total # of signals placed in this queue */ + u64 num_overflows; /* Total # of inserts failed due to + * full queue */ + u32 signal_size; /* Total size of a signal for this queue */ + u32 max_slots; /* Max # of slots in queue, 1 slot is + * always empty */ + u32 max_signals; /* Max # of signals in queue + * (MaxSignalSlots-1) */ + u32 head; /* Queue head signal # */ + /* 2nd cache line */ + u64 num_received; /* Total # of signals removed from this queue */ + u32 tail; /* Queue tail signal # (on separate + * cache line) */ + u32 reserved1; /* Reserved field */ + u64 reserved2; /* Reserved field */ + u64 client_queue; + u64 num_irq_received; /* Total # of Interrupts received. This + * is incremented by the ISR in the + * guest windows driver */ + u64 num_empty; /* Number of times that visor_signal_remove + * is called and returned Empty + * Status. */ + u32 errorflags; /* Error bits set during SignalReinit + * to denote trouble with client's + * fields */ + u8 filler[12]; /* Pad out to 64 byte cacheline */ +}; + +#pragma pack(pop) + +#define spar_signal_init(chan, QHDRFLD, QDATAFLD, QDATATYPE, ver, typ) \ + do { \ + memset(&chan->QHDRFLD, 0, sizeof(chan->QHDRFLD)); \ + chan->QHDRFLD.version = ver; \ + chan->QHDRFLD.chtype = typ; \ + chan->QHDRFLD.size = sizeof(chan->QDATAFLD); \ + chan->QHDRFLD.signal_size = sizeof(QDATATYPE); \ + chan->QHDRFLD.sig_base_offset = (u64)(chan->QDATAFLD)- \ + (u64)(&chan->QHDRFLD); \ + chan->QHDRFLD.max_slots = \ + sizeof(chan->QDATAFLD)/sizeof(QDATATYPE); \ + chan->QHDRFLD.max_signals = chan->QHDRFLD.max_slots-1; \ + } while (0) + +/* Generic function useful for validating any type of channel when it is + * received by the client that will be accessing the channel. + * Note that is only needed for callers in the EFI environment, and + * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages. + */ +static inline int +spar_check_channel_client(void __iomem *ch, + uuid_le expected_uuid, + char *chname, + u64 expected_min_bytes, + u32 expected_version, + u64 expected_signature) +{ + if (uuid_le_cmp(expected_uuid, NULL_UUID_LE) != 0) { + uuid_le guid; + + memcpy_fromio(&guid, + &((struct channel_header __iomem *)(ch))->chtype, + sizeof(guid)); + /* caller wants us to verify type GUID */ + if (uuid_le_cmp(guid, expected_uuid) != 0) { + pr_err("Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n", + chname, &expected_uuid, + &expected_uuid, &guid); + return 0; + } + } + if (expected_min_bytes > 0) { /* caller wants us to verify + * channel size */ + unsigned long long bytes = + readq(&((struct channel_header __iomem *) + (ch))->size); + if (bytes < expected_min_bytes) { + pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n", + chname, &expected_uuid, + (unsigned long long)expected_min_bytes, bytes); + return 0; + } + } + if (expected_version > 0) { /* caller wants us to verify + * channel version */ + unsigned long ver = readl(&((struct channel_header __iomem *) + (ch))->version_id); + if (ver != expected_version) { + pr_err("Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8lx\n", + chname, &expected_uuid, + (unsigned long)expected_version, ver); + return 0; + } + } + if (expected_signature > 0) { /* caller wants us to verify + * channel signature */ + unsigned long long sig = + readq(&((struct channel_header __iomem *) + (ch))->signature); + if (sig != expected_signature) { + pr_err("Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8llx actual=0x%-8.8llx\n", + chname, &expected_uuid, + expected_signature, sig); + return 0; + } + } + return 1; +} + +/* Generic function useful for validating any type of channel when it is about + * to be initialized by the server of the channel. + * Note that is only needed for callers in the EFI environment, and + * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages. + */ +static inline int spar_check_channel_server(uuid_le typeuuid, char *name, + u64 expected_min_bytes, + u64 actual_bytes) +{ + if (expected_min_bytes > 0) /* caller wants us to verify + * channel size */ + if (actual_bytes < expected_min_bytes) { + pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8llx actual=0x%-8.8llx\n", + name, &typeuuid, expected_min_bytes, + actual_bytes); + return 0; + } + return 1; +} + +/* Given a file pathname (with '/' or '\' separating directory nodes), + * returns a pointer to the beginning of a node within that pathname such + * that the number of nodes from that pointer to the end of the string is + * NOT more than . Note that if the pathname has less than nodes + * in it, the return pointer will be to the beginning of the string. + */ +static inline u8 * +pathname_last_n_nodes(u8 *s, unsigned int n) +{ + u8 *p = s; + unsigned int node_count = 0; + + while (*p != '\0') { + if ((*p == '/') || (*p == '\\')) + node_count++; + p++; + } + if (node_count <= n) + return s; + while (n > 0) { + p--; + if (p == s) + break; /* should never happen, unless someone + * is changing the string while we are + * looking at it!! */ + if ((*p == '/') || (*p == '\\')) + n--; + } + return p + 1; +} + +static inline int +spar_channel_client_acquire_os(void __iomem *ch, u8 *id) +{ + struct channel_header __iomem *hdr = ch; + + if (readl(&hdr->cli_state_os) == CHANNELCLI_DISABLED) { + if ((readb(&hdr->cli_error_os) + & ULTRA_CLIERROROS_THROTTLEMSG_DISABLED) == 0) { + /* we are NOT throttling this message */ + writeb(readb(&hdr->cli_error_os) | + ULTRA_CLIERROROS_THROTTLEMSG_DISABLED, + &hdr->cli_error_os); + /* throttle until acquire successful */ + + pr_info("%s Channel StateTransition INVALID! - acquire failed because OS client DISABLED\n", + id); + } + return 0; + } + if ((readl(&hdr->cli_state_os) != CHANNELCLI_OWNED) && + (readl(&hdr->cli_state_boot) == CHANNELCLI_DISABLED)) { + /* Our competitor is DISABLED, so we can transition to OWNED */ + pr_info("%s Channel StateTransition (%s) %s(%d)-->%s(%d)\n", + id, "cli_state_os", + ULTRA_CHANNELCLI_STRING(readl(&hdr->cli_state_os)), + readl(&hdr->cli_state_os), + ULTRA_CHANNELCLI_STRING(CHANNELCLI_OWNED), + CHANNELCLI_OWNED); + writel(CHANNELCLI_OWNED, &hdr->cli_state_os); + mb(); /* required for channel synch */ + } + if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED) { + if (readb(&hdr->cli_error_os) != 0) { + /* we are in an error msg throttling state; + * come out of it */ + pr_info("%s Channel OS client acquire now successful\n", + id); + writeb(0, &hdr->cli_error_os); + } + return 1; + } + + /* We have to do it the "hard way". We transition to BUSY, + * and can use the channel iff our competitor has not also + * transitioned to BUSY. */ + if (readl(&hdr->cli_state_os) != CHANNELCLI_ATTACHED) { + if ((readb(&hdr->cli_error_os) + & ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED) == 0) { + /* we are NOT throttling this message */ + writeb(readb(&hdr->cli_error_os) | + ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED, + &hdr->cli_error_os); + /* throttle until acquire successful */ + pr_info("%s Channel StateTransition INVALID! - acquire failed because OS client NOT ATTACHED (state=%s(%d))\n", + id, ULTRA_CHANNELCLI_STRING( + readl(&hdr->cli_state_os)), + readl(&hdr->cli_state_os)); + } + return 0; + } + writel(CHANNELCLI_BUSY, &hdr->cli_state_os); + mb(); /* required for channel synch */ + if (readl(&hdr->cli_state_boot) == CHANNELCLI_BUSY) { + if ((readb(&hdr->cli_error_os) + & ULTRA_CLIERROROS_THROTTLEMSG_BUSY) == 0) { + /* we are NOT throttling this message */ + writeb(readb(&hdr->cli_error_os) | + ULTRA_CLIERROROS_THROTTLEMSG_BUSY, + &hdr->cli_error_os); + /* throttle until acquire successful */ + pr_info("%s Channel StateTransition failed - host OS acquire failed because boot BUSY\n", + id); + } + /* reset busy */ + writel(CHANNELCLI_ATTACHED, &hdr->cli_state_os); + mb(); /* required for channel synch */ + return 0; + } + if (readb(&hdr->cli_error_os) != 0) { + /* we are in an error msg throttling state; come out of it */ + pr_info("%s Channel OS client acquire now successful\n", id); + writeb(0, &hdr->cli_error_os); + } + return 1; +} + +static inline void +spar_channel_client_release_os(void __iomem *ch, u8 *id) +{ + struct channel_header __iomem *hdr = ch; + + if (readb(&hdr->cli_error_os) != 0) { + /* we are in an error msg throttling state; come out of it */ + pr_info("%s Channel OS client error state cleared\n", id); + writeb(0, &hdr->cli_error_os); + } + if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED) + return; + if (readl(&hdr->cli_state_os) != CHANNELCLI_BUSY) { + pr_info("%s Channel StateTransition INVALID! - release failed because OS client NOT BUSY (state=%s(%d))\n", + id, ULTRA_CHANNELCLI_STRING( + readl(&hdr->cli_state_os)), + readl(&hdr->cli_state_os)); + /* return; */ + } + writel(CHANNELCLI_ATTACHED, &hdr->cli_state_os); /* release busy */ +} + +/* +* Routine Description: +* Tries to insert the prebuilt signal pointed to by pSignal into the nth +* Queue of the Channel pointed to by pChannel +* +* Parameters: +* pChannel: (IN) points to the IO Channel +* Queue: (IN) nth Queue of the IO Channel +* pSignal: (IN) pointer to the signal +* +* Assumptions: +* - pChannel, Queue and pSignal are valid. +* - If insertion fails due to a full queue, the caller will determine the +* retry policy (e.g. wait & try again, report an error, etc.). +* +* Return value: 1 if the insertion succeeds, 0 if the queue was +* full. +*/ + +unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue, + void *sig); + +/* +* Routine Description: +* Removes one signal from Channel pChannel's nth Queue at the +* time of the call and copies it into the memory pointed to by +* pSignal. +* +* Parameters: +* pChannel: (IN) points to the IO Channel +* Queue: (IN) nth Queue of the IO Channel +* pSignal: (IN) pointer to where the signals are to be copied +* +* Assumptions: +* - pChannel and Queue are valid. +* - pSignal points to a memory area large enough to hold queue's SignalSize +* +* Return value: 1 if the removal succeeds, 0 if the queue was +* empty. +*/ + +unsigned char spar_signal_remove(struct channel_header __iomem *ch, u32 queue, + void *sig); + +/* +* Routine Description: +* Removes all signals present in Channel pChannel's nth Queue at the +* time of the call and copies them into the memory pointed to by +* pSignal. Returns the # of signals copied as the value of the routine. +* +* Parameters: +* pChannel: (IN) points to the IO Channel +* Queue: (IN) nth Queue of the IO Channel +* pSignal: (IN) pointer to where the signals are to be copied +* +* Assumptions: +* - pChannel and Queue are valid. +* - pSignal points to a memory area large enough to hold Queue's MaxSignals +* # of signals, each of which is Queue's SignalSize. +* +* Return value: +* # of signals copied. +*/ +unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue, + void *sig); + +/* +* Routine Description: +* Determine whether a signal queue is empty. +* +* Parameters: +* pChannel: (IN) points to the IO Channel +* Queue: (IN) nth Queue of the IO Channel +* +* Return value: +* 1 if the signal queue is empty, 0 otherwise. +*/ +unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch, + u32 queue); + +#endif diff --git a/kernel/drivers/staging/unisys/common-spar/include/channels/channel_guid.h b/kernel/drivers/staging/unisys/common-spar/include/channels/channel_guid.h new file mode 100644 index 000000000..706363fc3 --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/channels/channel_guid.h @@ -0,0 +1,61 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* + * CHANNEL Guids + */ + +/* Used in IOChannel + * {414815ed-c58c-11da-95a9-00e08161165f} + */ +#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID \ + UUID_LE(0x414815ed, 0xc58c, 0x11da, \ + 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f) +static const uuid_le spar_vhba_channel_protocol_uuid = + SPAR_VHBA_CHANNEL_PROTOCOL_UUID; + +/* Used in IOChannel + * {8cd5994d-c58e-11da-95a9-00e08161165f} + */ +#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID \ + UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \ + 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f) +static const uuid_le spar_vnic_channel_protocol_uuid = + SPAR_VNIC_CHANNEL_PROTOCOL_UUID; + +/* Used in IOChannel + * {72120008-4AAB-11DC-8530-444553544200} + */ +#define SPAR_SIOVM_UUID \ + UUID_LE(0x72120008, 0x4AAB, 0x11DC, \ + 0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00) +static const uuid_le spar_siovm_uuid = SPAR_SIOVM_UUID; + +/* Used in visornoop/visornoop_main.c + * {5b52c5ac-e5f5-4d42-8dff-429eaecd221f} + */ +#define SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID \ + UUID_LE(0x5b52c5ac, 0xe5f5, 0x4d42, \ + 0x8d, 0xff, 0x42, 0x9e, 0xae, 0xcd, 0x22, 0x1f) + +static const uuid_le spar_controldirector_channel_protocol_uuid = + SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID; + +/* Used in visorchipset/visorchipset_main.c + * {B4E79625-AEDE-4EAA-9E11-D3EDDCD4504C} + */ +#define SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID \ + UUID_LE(0xb4e79625, 0xaede, 0x4eaa, \ + 0x9e, 0x11, 0xd3, 0xed, 0xdc, 0xd4, 0x50, 0x4c) diff --git a/kernel/drivers/staging/unisys/common-spar/include/channels/controlframework.h b/kernel/drivers/staging/unisys/common-spar/include/channels/controlframework.h new file mode 100644 index 000000000..33d9caf33 --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/channels/controlframework.h @@ -0,0 +1,62 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* + * Module Name: + * controlframework.h + * + * Abstract: This file defines common structures in the unmanaged + * Ultravisor (mostly EFI) space. + * + */ + +#ifndef _CONTROL_FRAMEWORK_H_ +#define _CONTROL_FRAMEWORK_H_ + +#include +#include "channel.h" + +struct spar_segment_state { + u16 enabled:1; /* Bit 0: May enter other states */ + u16 active:1; /* Bit 1: Assigned to active partition */ + u16 alive:1; /* Bit 2: Configure message sent to + * service/server */ + u16 revoked:1; /* Bit 3: similar to partition state + * ShuttingDown */ + u16 allocated:1; /* Bit 4: memory (device/port number) + * has been selected by Command */ + u16 known:1; /* Bit 5: has been introduced to the + * service/guest partition */ + u16 ready:1; /* Bit 6: service/Guest partition has + * responded to introduction */ + u16 operating:1; /* Bit 7: resource is configured and + * operating */ + /* Note: don't use high bit unless we need to switch to ushort + * which is non-compliant */ +}; + +static const struct spar_segment_state segment_state_running = { + 1, 1, 1, 0, 1, 1, 1, 1 +}; + +static const struct spar_segment_state segment_state_paused = { + 1, 1, 1, 0, 1, 1, 1, 0 +}; + +static const struct spar_segment_state segment_state_standby = { + 1, 1, 0, 0, 1, 1, 1, 0 +}; + +#endif /* _CONTROL_FRAMEWORK_H_ not defined */ diff --git a/kernel/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h b/kernel/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h new file mode 100644 index 000000000..a66db7968 --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h @@ -0,0 +1,511 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __CONTROLVMCHANNEL_H__ +#define __CONTROLVMCHANNEL_H__ + +#include +#include "channel.h" +#include "controlframework.h" + +typedef u64 GUEST_PHYSICAL_ADDRESS; + +enum { INVALID_GUEST_FIRMWARE, SAMPLE_GUEST_FIRMWARE, + TIANO32_GUEST_FIRMWARE, TIANO64_GUEST_FIRMWARE +}; + +/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */ +#define SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID \ + UUID_LE(0x2b3c2d10, 0x7ef5, 0x4ad8, \ + 0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d) + +static const uuid_le spar_controlvm_channel_protocol_uuid = + SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID; + +#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE \ + ULTRA_CHANNEL_PROTOCOL_SIGNATURE +#define CONTROLVM_MESSAGE_MAX 64 + +/* Must increment this whenever you insert or delete fields within +* this channel struct. Also increment whenever you change the meaning +* of fields within this channel struct so as to break pre-existing +* software. Note that you can usually add fields to the END of the +* channel struct withOUT needing to increment this. */ +#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID 1 + +#define SPAR_CONTROLVM_CHANNEL_OK_CLIENT(ch) \ + spar_check_channel_client(ch, \ + spar_controlvm_channel_protocol_uuid, \ + "controlvm", \ + sizeof(struct spar_controlvm_channel_protocol), \ + ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID, \ + ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE) + +#define MY_DEVICE_INDEX 0 +#define MAX_MACDATA_LEN 8 /* number of bytes for MAC address in config packet */ +#define MAX_SERIAL_NUM 32 + +#define DISK_ZERO_PUN_NUMBER 1 /* Target ID on the SCSI bus for LUN 0 */ +#define DISK_ZERO_LUN_NUMBER 3 /* Logical Unit Number */ + +/* Defines for various channel queues... */ +#define CONTROLVM_QUEUE_REQUEST 0 +#define CONTROLVM_QUEUE_RESPONSE 1 +#define CONTROLVM_QUEUE_EVENT 2 +#define CONTROLVM_QUEUE_ACK 3 + +/* Max number of messages stored during IOVM creation to be reused + * after crash */ +#define CONTROLVM_CRASHMSG_MAX 2 + +/** Ids for commands that may appear in either queue of a ControlVm channel. + * + * Commands that are initiated by the command partition (CP), by an IO or + * console service partition (SP), or by a guest partition (GP)are: + * - issued on the RequestQueue queue (q #0) in the ControlVm channel + * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel + * + * Events that are initiated by an IO or console service partition (SP) or + * by a guest partition (GP) are: + * - issued on the EventQueue queue (q #2) in the ControlVm channel + * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel + */ +enum controlvm_id { + CONTROLVM_INVALID = 0, + /* SWITCH commands required Parameter: SwitchNumber */ + /* BUS commands required Parameter: BusNumber */ + CONTROLVM_BUS_CREATE = 0x101, /* CP --> SP, GP */ + CONTROLVM_BUS_DESTROY = 0x102, /* CP --> SP, GP */ + CONTROLVM_BUS_CONFIGURE = 0x104, /* CP --> SP */ + CONTROLVM_BUS_CHANGESTATE = 0x105, /* CP --> SP, GP */ + CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106, /* SP, GP --> CP */ +/* DEVICE commands required Parameter: BusNumber, DeviceNumber */ + + CONTROLVM_DEVICE_CREATE = 0x201, /* CP --> SP, GP */ + CONTROLVM_DEVICE_DESTROY = 0x202, /* CP --> SP, GP */ + CONTROLVM_DEVICE_CONFIGURE = 0x203, /* CP --> SP */ + CONTROLVM_DEVICE_CHANGESTATE = 0x204, /* CP --> SP, GP */ + CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205, /* SP, GP --> CP */ + CONTROLVM_DEVICE_RECONFIGURE = 0x206, /* CP --> Boot */ +/* DISK commands required Parameter: BusNumber, DeviceNumber */ + CONTROLVM_DISK_CREATE = 0x221, /* CP --> SP */ + CONTROLVM_DISK_DESTROY = 0x222, /* CP --> SP */ + CONTROLVM_DISK_CONFIGURE = 0x223, /* CP --> SP */ + CONTROLVM_DISK_CHANGESTATE = 0x224, /* CP --> SP */ +/* CHIPSET commands */ + CONTROLVM_CHIPSET_INIT = 0x301, /* CP --> SP, GP */ + CONTROLVM_CHIPSET_STOP = 0x302, /* CP --> SP, GP */ + CONTROLVM_CHIPSET_SHUTDOWN = 0x303, /* CP --> SP */ + CONTROLVM_CHIPSET_READY = 0x304, /* CP --> SP */ + CONTROLVM_CHIPSET_SELFTEST = 0x305, /* CP --> SP */ + +}; + +struct irq_info { + /**< specifies interrupt info. It is used to send interrupts + * for this channel. The peer at the end of this channel + * who has registered an interrupt (using recv fields + * above) will receive the interrupt. Passed as a parameter + * to Issue_VMCALL_IO_QUEUE_TRANSITION, which generates the + * interrupt. Currently this is used by IOPart-SP to wake + * up GP when Data Channel transitions from empty to + * non-empty.*/ + u64 send_irq_handle; + + /**< specifies interrupt handle. It is used to retrieve the + * corresponding interrupt pin from Monitor; and the + * interrupt pin is used to connect to the corresponding + * interrupt. Used by IOPart-GP only. */ + u64 recv_irq_handle; + + /**< specifies interrupt vector. It, interrupt pin, and shared are + * used to connect to the corresponding interrupt. Used by + * IOPart-GP only. */ + u32 recv_irq_vector; + + /**< specifies if the recvInterrupt is shared. It, interrupt pin + * and vector are used to connect to 0 = not shared; 1 = shared. + * the corresponding interrupt. Used by IOPart-GP only. */ + u8 recv_irq_shared; + u8 reserved[3]; /* Natural alignment purposes */ +}; + +struct pci_id { + u16 domain; + u8 bus; + u8 slot; + u8 func; + u8 reserved[3]; /* Natural alignment purposes */ +}; + +struct efi_spar_indication { + u64 boot_to_fw_ui:1; /* Bit 0: Stop in uefi ui */ + u64 clear_nvram:1; /* Bit 1: Clear NVRAM */ + u64 clear_cmos:1; /* Bit 2: Clear CMOS */ + u64 boot_to_tool:1; /* Bit 3: Run install tool */ + /* remaining bits are available */ +}; + +enum ultra_chipset_feature { + ULTRA_CHIPSET_FEATURE_REPLY = 0x00000001, + ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002, + ULTRA_CHIPSET_FEATURE_PCIVBUS = 0x00000004 +}; + +/** This is the common structure that is at the beginning of every + * ControlVm message (both commands and responses) in any ControlVm + * queue. Commands are easily distinguished from responses by + * looking at the flags.response field. + */ +struct controlvm_message_header { + u32 id; /* See CONTROLVM_ID. */ + /* For requests, indicates the message type. */ + /* For responses, indicates the type of message we are responding to. */ + + u32 message_size; /* Includes size of this struct + size + * of message */ + u32 segment_index; /* Index of segment containing Vm + * message/information */ + u32 completion_status; /* Error status code or result of + * message completion */ + struct { + u32 failed:1; /**< =1 in a response to * signify + * failure */ + u32 response_expected:1; /**< =1 in all messages that expect a + * response (Control ignores this + * bit) */ + u32 server:1; /**< =1 in all bus & device-related + * messages where the message + * receiver is to act as the bus or + * device server */ + u32 test_message:1; /**< =1 for testing use only + * (Control and Command ignore this + * bit) */ + u32 partial_completion:1; /**< =1 if there are forthcoming + * responses/acks associated + * with this message */ + u32 preserve:1; /**< =1 this is to let us know to + * preserve channel contents + * (for running guests)*/ + u32 writer_in_diag:1; /**< =1 the DiagWriter is active in the + * Diagnostic Partition*/ + } flags; + u32 reserved; /* Natural alignment */ + u64 message_handle; /* Identifies the particular message instance, + * and is used to match particular */ + /* request instances with the corresponding response instance. */ + u64 payload_vm_offset; /* Offset of payload area from start of this + * instance of ControlVm segment */ + u32 payload_max_bytes; /* Maximum bytes allocated in payload + * area of ControlVm segment */ + u32 payload_bytes; /* Actual number of bytes of payload + * area to copy between IO/Command; */ + /* if non-zero, there is a payload to copy. */ +}; + +struct controlvm_packet_device_create { + u32 bus_no; /* bus # (0..n-1) from the msg receiver's end */ + u32 dev_no; /* bus-relative (0..n-1) device number */ + u64 channel_addr; /* Guest physical address of the channel, which + * can be dereferenced by the receiver of this + * ControlVm command */ + u64 channel_bytes; /* specifies size of the channel in bytes */ + uuid_le data_type_uuid; /* specifies format of data in channel */ + uuid_le dev_inst_uuid; /* instance guid for the device */ + struct irq_info intr; /* specifies interrupt information */ +}; /* for CONTROLVM_DEVICE_CREATE */ + +struct controlvm_packet_device_configure { + u32 bus_no; /**< bus # (0..n-1) from the msg + * receiver's perspective */ + + /* Control uses header SegmentIndex field to access bus number... */ + u32 dev_no; /**< bus-relative (0..n-1) device number */ +} ; /* for CONTROLVM_DEVICE_CONFIGURE */ + +struct controlvm_message_device_create { + struct controlvm_message_header header; + struct controlvm_packet_device_create packet; +}; /* total 128 bytes */ + +struct controlvm_message_device_configure { + struct controlvm_message_header header; + struct controlvm_packet_device_configure packet; +}; /* total 56 bytes */ + +/* This is the format for a message in any ControlVm queue. */ +struct controlvm_message_packet { + union { + struct { + u32 bus_no; /* bus # (0..n-1) from the msg + * receiver's perspective */ + u32 dev_count; /* indicates the max number of + * devices on this bus */ + u64 channel_addr; /* Guest physical address of + * the channel, which can be + * dereferenced by the receiver + * of this ControlVm command */ + u64 channel_bytes; /* size of the channel */ + uuid_le bus_data_type_uuid; /* indicates format of + * data in bus channel*/ + uuid_le bus_inst_uuid; /* instance uuid for the bus */ + } create_bus; /* for CONTROLVM_BUS_CREATE */ + struct { + u32 bus_no; /* bus # (0..n-1) from the msg + * receiver's perspective */ + u32 reserved; /* Natural alignment purposes */ + } destroy_bus; /* for CONTROLVM_BUS_DESTROY */ + struct { + u32 bus_no; /* bus # (0..n-1) from the receiver's + * perspective */ + u32 reserved1; /* for alignment purposes */ + u64 guest_handle; /* This is used to convert + * guest physical address to + * physical address */ + u64 recv_bus_irq_handle; + /* specifies interrupt info. It is used by SP + * to register to receive interrupts from the + * CP. This interrupt is used for bus level + * notifications. The corresponding + * sendBusInterruptHandle is kept in CP. */ + } configure_bus; /* for CONTROLVM_BUS_CONFIGURE */ + /* for CONTROLVM_DEVICE_CREATE */ + struct controlvm_packet_device_create create_device; + struct { + u32 bus_no; /* bus # (0..n-1) from the msg + * receiver's perspective */ + u32 dev_no; /* bus-relative (0..n-1) device # */ + } destroy_device; /* for CONTROLVM_DEVICE_DESTROY */ + /* for CONTROLVM_DEVICE_CONFIGURE */ + struct controlvm_packet_device_configure configure_device; + struct { + u32 bus_no; /* bus # (0..n-1) from the msg + * receiver's perspective */ + u32 dev_no; /* bus-relative (0..n-1) device # */ + } reconfigure_device; /* for CONTROLVM_DEVICE_RECONFIGURE */ + struct { + u32 bus_no; + struct spar_segment_state state; + u8 reserved[2]; /* Natural alignment purposes */ + } bus_change_state; /* for CONTROLVM_BUS_CHANGESTATE */ + struct { + u32 bus_no; + u32 dev_no; + struct spar_segment_state state; + struct { + u32 phys_device:1; /* =1 if message is for + * a physical device */ + } flags; + u8 reserved[2]; /* Natural alignment purposes */ + } device_change_state; /* for CONTROLVM_DEVICE_CHANGESTATE */ + struct { + u32 bus_no; + u32 dev_no; + struct spar_segment_state state; + u8 reserved[6]; /* Natural alignment purposes */ + } device_change_state_event; + /* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */ + struct { + u32 bus_count; /* indicates the max number of busses */ + u32 switch_count; /* indicates the max number of + * switches if a service partition */ + enum ultra_chipset_feature features; + u32 platform_number; /* Platform Number */ + } init_chipset; /* for CONTROLVM_CHIPSET_INIT */ + struct { + u32 options; /* reserved */ + u32 test; /* bit 0 set to run embedded selftest */ + } chipset_selftest; /* for CONTROLVM_CHIPSET_SELFTEST */ + u64 addr; /* a physical address of something, that can be + * dereferenced by the receiver of this + * ControlVm command (depends on command id) */ + u64 handle; /* a handle of something (depends on command + * id) */ + }; +}; + +/* All messages in any ControlVm queue have this layout. */ +struct controlvm_message { + struct controlvm_message_header hdr; + struct controlvm_message_packet cmd; +}; + +struct device_map { + GUEST_PHYSICAL_ADDRESS device_channel_address; + u64 device_channel_size; + u32 ca_index; + u32 reserved; /* natural alignment */ + u64 reserved2; /* Align structure on 32-byte boundary */ +}; + +struct guest_devices { + struct device_map video_channel; + struct device_map keyboard_channel; + struct device_map network_channel; + struct device_map storage_channel; + struct device_map console_channel; + u32 partition_index; + u32 pad; +}; + +struct spar_controlvm_channel_protocol { + struct channel_header header; + GUEST_PHYSICAL_ADDRESS gp_controlvm; /* guest physical address of + * this channel */ + GUEST_PHYSICAL_ADDRESS gp_partition_tables;/* guest physical address of + * partition tables */ + GUEST_PHYSICAL_ADDRESS gp_diag_guest; /* guest physical address of + * diagnostic channel */ + GUEST_PHYSICAL_ADDRESS gp_boot_romdisk;/* guest phys addr of (read + * only) Boot ROM disk */ + GUEST_PHYSICAL_ADDRESS gp_boot_ramdisk;/* guest phys addr of writable + * Boot RAM disk */ + GUEST_PHYSICAL_ADDRESS gp_acpi_table; /* guest phys addr of acpi + * table */ + GUEST_PHYSICAL_ADDRESS gp_control_channel;/* guest phys addr of control + * channel */ + GUEST_PHYSICAL_ADDRESS gp_diag_romdisk;/* guest phys addr of diagnostic + * ROM disk */ + GUEST_PHYSICAL_ADDRESS gp_nvram; /* guest phys addr of NVRAM + * channel */ + u64 request_payload_offset; /* Offset to request payload area */ + u64 event_payload_offset; /* Offset to event payload area */ + u32 request_payload_bytes; /* Bytes available in request payload + * area */ + u32 event_payload_bytes;/* Bytes available in event payload area */ + u32 control_channel_bytes; + u32 nvram_channel_bytes; /* Bytes in PartitionNvram segment */ + u32 message_bytes; /* sizeof(CONTROLVM_MESSAGE) */ + u32 message_count; /* CONTROLVM_MESSAGE_MAX */ + GUEST_PHYSICAL_ADDRESS gp_smbios_table;/* guest phys addr of SMBIOS + * tables */ + GUEST_PHYSICAL_ADDRESS gp_physical_smbios_table;/* guest phys addr of + * SMBIOS table */ + /* ULTRA_MAX_GUESTS_PER_SERVICE */ + struct guest_devices gp_obsolete_guest_devices[16]; + + /* guest physical address of EFI firmware image base */ + GUEST_PHYSICAL_ADDRESS virtual_guest_firmware_image_base; + + /* guest physical address of EFI firmware entry point */ + GUEST_PHYSICAL_ADDRESS virtual_guest_firmware_entry_point; + + /* guest EFI firmware image size */ + u64 virtual_guest_firmware_image_size; + + /* GPA = 1MB where EFI firmware image is copied to */ + GUEST_PHYSICAL_ADDRESS virtual_guest_firmware_boot_base; + GUEST_PHYSICAL_ADDRESS virtual_guest_image_base; + GUEST_PHYSICAL_ADDRESS virtual_guest_image_size; + u64 prototype_control_channel_offset; + GUEST_PHYSICAL_ADDRESS virtual_guest_partition_handle; + + u16 restore_action; /* Restore Action field to restore the guest + * partition */ + u16 dump_action; /* For Windows guests it shows if the visordisk + * is running in dump mode */ + u16 nvram_fail_count; + u16 saved_crash_message_count; /* = CONTROLVM_CRASHMSG_MAX */ + u32 saved_crash_message_offset; /* Offset to request payload area needed + * for crash dump */ + u32 installation_error; /* Type of error encountered during + * installation */ + u32 installation_text_id; /* Id of string to display */ + u16 installation_remaining_steps;/* Number of remaining installation + * steps (for progress bars) */ + u8 tool_action; /* ULTRA_TOOL_ACTIONS Installation Action + * field */ + u8 reserved; /* alignment */ + struct efi_spar_indication efi_spar_ind; + struct efi_spar_indication efi_spar_ind_supported; + u32 sp_reserved; + u8 reserved2[28]; /* Force signals to begin on 128-byte cache + * line */ + struct signal_queue_header request_queue;/* Service or guest partition + * uses this queue to send + * requests to Control */ + struct signal_queue_header response_queue;/* Control uses this queue to + * respond to service or guest + * partition requests */ + struct signal_queue_header event_queue; /* Control uses this queue to + * send events to service or + * guest partition */ + struct signal_queue_header event_ack_queue;/* Service or guest partition + * uses this queue to ack + * Control events */ + + /* Request fixed-size message pool - does not include payload */ + struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX]; + + /* Response fixed-size message pool - does not include payload */ + struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX]; + + /* Event fixed-size message pool - does not include payload */ + struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX]; + + /* Ack fixed-size message pool - does not include payload */ + struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX]; + + /* Message stored during IOVM creation to be reused after crash */ + struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX]; +}; + +/* Offsets for VM channel attributes... */ +#define VM_CH_REQ_QUEUE_OFFSET \ + offsetof(struct spar_controlvm_channel_protocol, request_queue) +#define VM_CH_RESP_QUEUE_OFFSET \ + offsetof(struct spar_controlvm_channel_protocol, response_queue) +#define VM_CH_EVENT_QUEUE_OFFSET \ + offsetof(struct spar_controlvm_channel_protocol, event_queue) +#define VM_CH_ACK_QUEUE_OFFSET \ + offsetof(struct spar_controlvm_channel_protocol, event_ack_queue) +#define VM_CH_REQ_MSG_OFFSET \ + offsetof(struct spar_controlvm_channel_protocol, request_msg) +#define VM_CH_RESP_MSG_OFFSET \ + offsetof(struct spar_controlvm_channel_protocol, response_msg) +#define VM_CH_EVENT_MSG_OFFSET \ + offsetof(struct spar_controlvm_channel_protocol, event_msg) +#define VM_CH_ACK_MSG_OFFSET \ + offsetof(struct spar_controlvm_channel_protocol, event_ack_msg) +#define VM_CH_CRASH_MSG_OFFSET \ + offsetof(struct spar_controlvm_channel_protocol, saved_crash_msg) + +/* The following header will be located at the beginning of PayloadVmOffset for + * various ControlVm commands. The receiver of a ControlVm command with a + * PayloadVmOffset will dereference this address and then use connection_offset, + * initiator_offset, and target_offset to get the location of UTF-8 formatted + * strings that can be parsed to obtain command-specific information. The value + * of total_length should equal PayloadBytes. The format of the strings at + * PayloadVmOffset will take different forms depending on the message. + */ +struct spar_controlvm_parameters_header { + u32 total_length; + u32 header_length; + u32 connection_offset; + u32 connection_length; + u32 initiator_offset; + u32 initiator_length; + u32 target_offset; + u32 target_length; + u32 client_offset; + u32 client_length; + u32 name_offset; + u32 name_length; + uuid_le id; + u32 revision; + u32 reserved; /* Natural alignment */ +}; + +#endif /* __CONTROLVMCHANNEL_H__ */ diff --git a/kernel/drivers/staging/unisys/common-spar/include/channels/diagchannel.h b/kernel/drivers/staging/unisys/common-spar/include/channels/diagchannel.h new file mode 100644 index 000000000..e8fb8678a --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/channels/diagchannel.h @@ -0,0 +1,427 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/*++ + * + * Module Name: + * + * diagchannel.h + * + * Abstract: + * + * This file defines the DiagChannel protocol. This protocol is used to aid in + * preserving event data sent by external applications. This protocol provides + * a region for event data to reside in. This data will eventually be sent to + * the Boot Partition where it will be committed to memory and/or disk. This + * file contains platform-independent data that can be built using any + * Supervisor build environment (Windows, Linux, EFI). + * +*/ + +#ifndef _DIAG_CHANNEL_H_ +#define _DIAG_CHANNEL_H_ + +#include +#include "channel.h" + +/* {EEA7A573-DB82-447c-8716-EFBEAAAE4858} */ +#define SPAR_DIAG_CHANNEL_PROTOCOL_UUID \ + UUID_LE(0xeea7a573, 0xdb82, 0x447c, \ + 0x87, 0x16, 0xef, 0xbe, 0xaa, 0xae, 0x48, 0x58) + +static const uuid_le spar_diag_channel_protocol_uuid = + SPAR_DIAG_CHANNEL_PROTOCOL_UUID; + +/* {E850F968-3263-4484-8CA5-2A35D087A5A8} */ +#define ULTRA_DIAG_ROOT_CHANNEL_PROTOCOL_GUID \ + UUID_LE(0xe850f968, 0x3263, 0x4484, \ + 0x8c, 0xa5, 0x2a, 0x35, 0xd0, 0x87, 0xa5, 0xa8) + +#define ULTRA_DIAG_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE + +/* Must increment this whenever you insert or delete fields within this channel +* struct. Also increment whenever you change the meaning of fields within this +* channel struct so as to break pre-existing software. Note that you can +* usually add fields to the END of the channel struct withOUT needing to +* increment this. */ +#define ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID 2 + +#define SPAR_DIAG_CHANNEL_OK_CLIENT(ch)\ + (spar_check_channel_client(ch,\ + spar_diag_channel_protocol_uuid,\ + "diag",\ + sizeof(struct spar_diag_channel_protocol),\ + ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID,\ + ULTRA_DIAG_CHANNEL_PROTOCOL_SIGNATURE)) + +#define SPAR_DIAG_CHANNEL_OK_SERVER(bytes)\ + (spar_check_channel_server(spar_diag_channel_protocol_uuid,\ + "diag",\ + sizeof(struct spar_diag_channel_protocol),\ + bytes)) + +#define MAX_MODULE_NAME_SIZE 128 /* Maximum length of module name... */ +#define MAX_ADDITIONAL_INFO_SIZE 256 /* Maximum length of any additional info + * accompanying event... */ +#define MAX_SUBSYSTEMS 64 /* Maximum number of subsystems allowed in + * DiagChannel... */ +#define LOW_SUBSYSTEMS 32 /* Half of MAX_SUBSYSTEMS to allow 64-bit + * math */ +#define SUBSYSTEM_DEBUG 0 /* Standard subsystem for debug events */ +#define SUBSYSTEM_DEFAULT 1 /* Default subsystem for legacy calls to + * ReportEvent */ + +/* few useful subsystem mask values */ +#define SUBSYSTEM_MASK_DEBUG 0x01 /* Standard subsystem for debug + * events */ +#define SUBSYSTEM_MASK_DEFAULT 0x02 /* Default subsystem for legacy calls to + * ReportEvents */ + +/* Event parameter "Severity" is overloaded with Cause in byte 2 and Severity in + * byte 0, bytes 1 and 3 are reserved */ +#define SEVERITY_MASK 0x0FF /* mask out all but the Severity in byte 0 */ +#define CAUSE_MASK 0x0FF0000 /* mask out all but the cause in byte 2 */ +#define CAUSE_SHIFT_AMT 16 /* shift 2 bytes to place it in byte 2 */ + +/* SubsystemSeverityFilter */ +#define SEVERITY_FILTER_MASK 0x0F /* mask out the Cause half, SeverityFilter is + * in the lower nibble */ +#define CAUSE_FILTER_MASK 0xF0 /* mask out the Severity half, CauseFilter is in + * the upper nibble */ +#define CAUSE_FILTER_SHIFT_AMT 4 /* shift amount to place it in lower or upper + * nibble */ + +/* Copied from EFI's EFI_TIME struct in efidef.h. EFI headers are not allowed +* in some of the Supervisor areas, such as Monitor, so it has been "ported" here +* for use in diagnostic event timestamps... */ +struct diag_efi_time { + u16 year; /* 1998 - 20XX */ + u8 month; /* 1 - 12 */ + u8 day; /* 1 - 31 */ + u8 hour; /* 0 - 23 */ + u8 minute; /* 0 - 59 */ + u8 second; /* 0 - 59 */ + u8 pad1; + u32 nanosecond; /* 0 - 999, 999, 999 */ + s16 timezone; /* -1440 to 1440 or 2047 */ + u8 daylight; + u8 pad2; +}; + +enum spar_component_types { + ULTRA_COMPONENT_GUEST = 0, + ULTRA_COMPONENT_MONITOR = 0x01, + ULTRA_COMPONENT_CCM = 0x02, /* Common Control module */ + /* RESERVED 0x03 - 0x7 */ + + /* Ultravisor Components */ + ULTRA_COMPONENT_BOOT = 0x08, + ULTRA_COMPONENT_IDLE = 0x09, + ULTRA_COMPONENT_CONTROL = 0x0A, + ULTRA_COMPONENT_LOGGER = 0x0B, + ULTRA_COMPONENT_ACPI = 0X0C, + /* RESERVED 0x0D - 0x0F */ + + /* sPAR Components */ + ULTRA_COMPONENT_COMMAND = 0x10, + ULTRA_COMPONENT_IODRIVER = 0x11, + ULTRA_COMPONENT_CONSOLE = 0x12, + ULTRA_COMPONENT_OPERATIONS = 0x13, + ULTRA_COMPONENT_MANAGEMENT = 0x14, + ULTRA_COMPONENT_DIAG = 0x15, + ULTRA_COMPONENT_HWDIAG = 0x16, + ULTRA_COMPONENT_PSERVICES = 0x17, + ULTRA_COMPONENT_PDIAG = 0x18 + /* RESERVED 0x18 - 0x1F */ +}; + +/* Structure: diag_channel_event Purpose: Contains attributes that make up an + * event to be written to the DIAG_CHANNEL memory. Attributes: EventId: Id of + * the diagnostic event to write to memory. Severity: Severity of the event + * (Error, Info, etc). ModuleName: Module/file name where event originated. + * LineNumber: Line number in module name where event originated. Timestamp: + * Date/time when event was received by ReportEvent, and written to DiagChannel. + * Reserved: Padding to align structure on a 64-byte cache line boundary. + * AdditionalInfo: Array of characters for additional event info (may be + * empty). */ +struct diag_channel_event { + u32 event_id; + u32 severity; + u8 module_name[MAX_MODULE_NAME_SIZE]; + u32 line_number; + struct diag_efi_time timestamp; /* Size = 16 bytes */ + u32 partition_number; /* Filled in by Diag Switch as pool blocks are + * filled */ + u16 vcpu_number; + u16 lcpu_number; + u8 component_type; /* ULTRA_COMPONENT_TYPES */ + u8 subsystem; + u16 reserved0; /* pad to u64 alignment */ + u32 block_no; /* filled in by DiagSwitch as pool blocks are + * filled */ + u32 block_no_high; + u32 event_no; /* filled in by DiagSwitch as pool blocks are + * filled */ + u32 event_no_high; + + /* The block_no and event_no fields are set only by DiagSwitch + * and referenced only by WinDiagDisplay formatting tool as + * additional diagnostic information. Other tools including + * WinDiagDisplay currently ignore these 'Reserved' bytes. */ + u8 reserved[8]; + u8 additional_info[MAX_ADDITIONAL_INFO_SIZE]; + + /* NOTE: Changes to diag_channel_event generally need to be reflected in + * existing copies * + * - for AppOS at + * GuestLinux/visordiag_early/supervisor_diagchannel.h * + * - for WinDiagDisplay at + * EFI/Ultra/Tools/WinDiagDisplay/WinDiagDisplay/diagstruct.h */ +}; + +/* Levels of severity for diagnostic events, in order from lowest severity to +* highest (i.e. fatal errors are the most severe, and should always be logged, +* but info events rarely need to be logged except during debugging). The values +* DIAG_SEVERITY_ENUM_BEGIN and DIAG_SEVERITY_ENUM_END are not valid severity +* values. They exist merely to dilineate the list, so that future additions +* won't require changes to the driver (i.e. when checking for out-of-range +* severities in SetSeverity). The values DIAG_SEVERITY_OVERRIDE and +* DIAG_SEVERITY_SHUTOFF are not valid severity values for logging events but +* they are valid for controlling the amount of event data. This enum is also +* defined in DotNet\sParFramework\ControlFramework\ControlFramework.cs. If a +* change is made to this enum, they should also be reflected in that file. */ +enum diag_severity { + DIAG_SEVERITY_ENUM_BEGIN = 0, + DIAG_SEVERITY_OVERRIDE = DIAG_SEVERITY_ENUM_BEGIN, + DIAG_SEVERITY_VERBOSE = DIAG_SEVERITY_OVERRIDE, /* 0 */ + DIAG_SEVERITY_INFO = DIAG_SEVERITY_VERBOSE + 1, /* 1 */ + DIAG_SEVERITY_WARNING = DIAG_SEVERITY_INFO + 1, /* 2 */ + DIAG_SEVERITY_ERR = DIAG_SEVERITY_WARNING + 1, /* 3 */ + DIAG_SEVERITY_PRINT = DIAG_SEVERITY_ERR + 1, /* 4 */ + DIAG_SEVERITY_SHUTOFF = DIAG_SEVERITY_PRINT + 1, /* 5 */ + DIAG_SEVERITY_ENUM_END = DIAG_SEVERITY_SHUTOFF, /* 5 */ + DIAG_SEVERITY_NONFATAL_ERR = DIAG_SEVERITY_ERR, + DIAG_SEVERITY_FATAL_ERR = DIAG_SEVERITY_PRINT +}; + +/* Event Cause enums +* +* Levels of cause for diagnostic events, in order from least to greatest cause +* Internal errors are most urgent since ideally they should never exist +* Invalid requests are preventable by avoiding invalid inputs +* Operations errors depend on environmental factors which may impact which +* requests are possible +* Manifest provides intermediate value to capture firmware and configuration +* version information +* Trace provides suplimental debug information in release firmware +* Unknown Log captures unclasified LogEvent calls. +* Debug is the least urgent since it provides suplimental debug information only +* in debug firmware +* Unknown Debug captures unclassified DebugEvent calls. +* This enum is also defined in +* DotNet\sParFramework\ControlFramework\ControlFramework.cs. +* If a change is made to this enum, they should also be reflected in that +* file. */ + +/* A cause value "DIAG_CAUSE_FILE_XFER" together with a severity value of +* "DIAG_SEVERITY_PRINT" (=4), is used for transferring text or binary file to +* the Diag partition. This cause-severity combination will be used by Logger +* DiagSwitch to segregate events into block types. The files are transferred in +* 256 byte chunks maximum, in the AdditionalInfo field of the diag_channel_event +* structure. In the file transfer mode, some event fields will have different +* meaning: EventId specifies the file offset, severity specifies the block type, +* ModuleName specifies the filename, LineNumber specifies the number of valid +* data bytes in an event and AdditionalInfo contains up to 256 bytes of data. */ + +/* The Diag DiagWriter appends event blocks to events.raw as today, and for data + * blocks uses diag_channel_event + * PartitionNumber to extract and append 'AdditionalInfo' to filename (specified + * by ModuleName). */ + +/* The Dell PDiag uses this new mechanism to stash DSET .zip onto the + * 'diagnostic' virtual disk. */ +enum diag_cause { + DIAG_CAUSE_UNKNOWN = 0, + DIAG_CAUSE_UNKNOWN_DEBUG = DIAG_CAUSE_UNKNOWN + 1, /* 1 */ + DIAG_CAUSE_DEBUG = DIAG_CAUSE_UNKNOWN_DEBUG + 1, /* 2 */ + DIAG_CAUSE_UNKNOWN_LOG = DIAG_CAUSE_DEBUG + 1, /* 3 */ + DIAG_CAUSE_TRACE = DIAG_CAUSE_UNKNOWN_LOG + 1, /* 4 */ + DIAG_CAUSE_MANIFEST = DIAG_CAUSE_TRACE + 1, /* 5 */ + DIAG_CAUSE_OPERATIONS_ERROR = DIAG_CAUSE_MANIFEST + 1, /* 6 */ + DIAG_CAUSE_INVALID_REQUEST = DIAG_CAUSE_OPERATIONS_ERROR + 1, /* 7 */ + DIAG_CAUSE_INTERNAL_ERROR = DIAG_CAUSE_INVALID_REQUEST + 1, /* 8 */ + DIAG_CAUSE_FILE_XFER = DIAG_CAUSE_INTERNAL_ERROR + 1, /* 9 */ + DIAG_CAUSE_ENUM_END = DIAG_CAUSE_FILE_XFER /* 9 */ +}; + +/* Event Cause category defined into the byte 2 of Severity */ +#define CAUSE_DEBUG (DIAG_CAUSE_DEBUG << CAUSE_SHIFT_AMT) +#define CAUSE_TRACE (DIAG_CAUSE_TRACE << CAUSE_SHIFT_AMT) +#define CAUSE_MANIFEST (DIAG_CAUSE_MANIFEST << CAUSE_SHIFT_AMT) +#define CAUSE_OPERATIONS_ERROR (DIAG_CAUSE_OPERATIONS_ERROR << CAUSE_SHIFT_AMT) +#define CAUSE_INVALID_REQUEST (DIAG_CAUSE_INVALID_REQUEST << CAUSE_SHIFT_AMT) +#define CAUSE_INTERNAL_ERROR (DIAG_CAUSE_INTERNAL_ERROR << CAUSE_SHIFT_AMT) +#define CAUSE_FILE_XFER (DIAG_CAUSE_FILE_XFER << CAUSE_SHIFT_AMT) +#define CAUSE_ENUM_END CAUSE_FILE_XFER + +/* Combine Cause and Severity categories into one */ +#define CAUSE_DEBUG_SEVERITY_VERBOSE \ + (CAUSE_DEBUG | DIAG_SEVERITY_VERBOSE) +#define CAUSE_TRACE_SEVERITY_VERBOSE \ + (CAUSE_TRACE | DIAG_SEVERITY_VERBOSE) +#define CAUSE_MANIFEST_SEVERITY_VERBOSE\ + (CAUSE_MANIFEST | DIAG_SEVERITY_VERBOSE) +#define CAUSE_OPERATIONS_SEVERITY_VERBOSE \ + (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_VERBOSE) +#define CAUSE_INVALID_SEVERITY_VERBOSE \ + (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_VERBOSE) +#define CAUSE_INTERNAL_SEVERITY_VERBOSE \ + (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_VERBOSE) + +#define CAUSE_DEBUG_SEVERITY_INFO \ + (CAUSE_DEBUG | DIAG_SEVERITY_INFO) +#define CAUSE_TRACE_SEVERITY_INFO \ + (CAUSE_TRACE | DIAG_SEVERITY_INFO) +#define CAUSE_MANIFEST_SEVERITY_INFO \ + (CAUSE_MANIFEST | DIAG_SEVERITY_INFO) +#define CAUSE_OPERATIONS_SEVERITY_INFO \ + (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_INFO) +#define CAUSE_INVALID_SEVERITY_INFO \ + (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_INFO) +#define CAUSE_INTERNAL_SEVERITY_INFO \ + (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_INFO) + +#define CAUSE_DEBUG_SEVERITY_WARN \ + (CAUSE_DEBUG | DIAG_SEVERITY_WARNING) +#define CAUSE_TRACE_SEVERITY_WARN \ + (CAUSE_TRACE | DIAG_SEVERITY_WARNING) +#define CAUSE_MANIFEST_SEVERITY_WARN \ + (CAUSE_MANIFEST | DIAG_SEVERITY_WARNING) +#define CAUSE_OPERATIONS_SEVERITY_WARN \ + (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_WARNING) +#define CAUSE_INVALID_SEVERITY_WARN \ + (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_WARNING) +#define CAUSE_INTERNAL_SEVERITY_WARN \ + (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_WARNING) + +#define CAUSE_DEBUG_SEVERITY_ERR \ + (CAUSE_DEBUG | DIAG_SEVERITY_ERR) +#define CAUSE_TRACE_SEVERITY_ERR \ + (CAUSE_TRACE | DIAG_SEVERITY_ERR) +#define CAUSE_MANIFEST_SEVERITY_ERR \ + (CAUSE_MANIFEST | DIAG_SEVERITY_ERR) +#define CAUSE_OPERATIONS_SEVERITY_ERR \ + (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_ERR) +#define CAUSE_INVALID_SEVERITY_ERR \ + (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_ERR) +#define CAUSE_INTERNAL_SEVERITY_ERR \ + (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_ERR) + +#define CAUSE_DEBUG_SEVERITY_PRINT \ + (CAUSE_DEBUG | DIAG_SEVERITY_PRINT) +#define CAUSE_TRACE_SEVERITY_PRINT \ + (CAUSE_TRACE | DIAG_SEVERITY_PRINT) +#define CAUSE_MANIFEST_SEVERITY_PRINT \ + (CAUSE_MANIFEST | DIAG_SEVERITY_PRINT) +#define CAUSE_OPERATIONS_SEVERITY_PRINT \ + (CAUSE_OPERATIONS_ERROR | DIAG_SEVERITY_PRINT) +#define CAUSE_INVALID_SEVERITY_PRINT \ + (CAUSE_INVALID_REQUEST | DIAG_SEVERITY_PRINT) +#define CAUSE_INTERNAL_SEVERITY_PRINT \ + (CAUSE_INTERNAL_ERROR | DIAG_SEVERITY_PRINT) +#define CAUSE_FILE_XFER_SEVERITY_PRINT \ + (CAUSE_FILE_XFER | DIAG_SEVERITY_PRINT) + +/* Structure: diag_channel_protocol_header + * + * Purpose: Contains attributes that make up the header specific to the + * DIAG_CHANNEL area. + * + * Attributes: + * + * DiagLock: Diag Channel spinlock. + * + *IsChannelInitialized: 1 iff SignalInit was called for this channel; otherwise + * 0, and assume the channel is not ready for use yet. + * + * Reserved: Padding to align the fields in this structure. + * + *SubsystemSeverityFilter: Level of severity on a subsystem basis that controls + * whether events are logged. Any event's severity for a + * particular subsystem below this level will be discarded. + */ +struct diag_channel_protocol_header { + u32 diag_lock; + u8 channel_initialized; + u8 reserved[3]; + u8 subsystem_severity_filter[64]; +}; + +/* The Diagram for the Diagnostic Channel: */ +/* ----------------------- */ +/* | Channel Header | Defined by ULTRA_CHANNEL_PROTOCOL */ +/* ----------------------- */ +/* | Signal Queue Header | Defined by SIGNAL_QUEUE_HEADER */ +/* ----------------------- */ +/* | DiagChannel Header | Defined by diag_channel_protocol_header */ +/* ----------------------- */ +/* | Channel Event Info | Defined by diag_channel_event*MAX_EVENTS */ +/* ----------------------- */ +/* | Reserved | Reserved (pad out to 4MB) */ +/* ----------------------- */ + +/* Offsets/sizes for diagnostic channel attributes... */ +#define DIAG_CH_QUEUE_HEADER_OFFSET (sizeof(struct channel_header)) +#define DIAG_CH_QUEUE_HEADER_SIZE (sizeof(struct signal_queue_header)) +#define DIAG_CH_PROTOCOL_HEADER_OFFSET \ + (DIAG_CH_QUEUE_HEADER_OFFSET + DIAG_CH_QUEUE_HEADER_SIZE) +#define DIAG_CH_PROTOCOL_HEADER_SIZE \ + (sizeof(struct diag_channel_protocol_header)) +#define DIAG_CH_EVENT_OFFSET \ + (DIAG_CH_PROTOCOL_HEADER_OFFSET + DIAG_CH_PROTOCOL_HEADER_SIZE) +#define DIAG_CH_SIZE (4096 * 1024) + +/* For Control and Idle Partitions with larger (8 MB) diagnostic(root) + * channels */ +#define DIAG_CH_LRG_SIZE (2 * DIAG_CH_SIZE) /* 8 MB */ + +/* + * Structure: spar_diag_channel_protocol + * + * Purpose: Contains attributes that make up the DIAG_CHANNEL memory. + * + * Attributes: + * + * CommonChannelHeader: Header info common to all channels. + * + * QueueHeader: Queue header common to all channels - used to determine where to + * store event. + * + * DiagChannelHeader: Diagnostic channel header info (see + * diag_channel_protocol_header comments). + * + * Events: Area where diagnostic events (up to MAX_EVENTS) are written. + * + *Reserved: Reserved area to allow for correct channel size padding. +*/ +struct spar_diag_channel_protocol { + struct channel_header common_channel_header; + struct signal_queue_header queue_header; + struct diag_channel_protocol_header diag_channel_header; + struct diag_channel_event events[(DIAG_CH_SIZE - DIAG_CH_EVENT_OFFSET) / + sizeof(struct diag_channel_event)]; +}; + +#endif diff --git a/kernel/drivers/staging/unisys/common-spar/include/channels/iochannel.h b/kernel/drivers/staging/unisys/common-spar/include/channels/iochannel.h new file mode 100644 index 000000000..3bd7579e1 --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/channels/iochannel.h @@ -0,0 +1,784 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION */ +/* All rights reserved. */ +#ifndef __IOCHANNEL_H__ +#define __IOCHANNEL_H__ + +/* +* Everything needed for IOPart-GuestPart communication is define in +* this file. Note: Everything is OS-independent because this file is +* used by Windows, Linux and possible EFI drivers. */ + +/* +* Communication flow between the IOPart and GuestPart uses the channel headers +* channel state. The following states are currently being used: +* UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED +* +* additional states will be used later. No locking is needed to switch between +* states due to the following rules: +* +* 1. IOPart is only the only partition allowed to change from UNIT +* 2. IOPart is only the only partition allowed to change from +* CHANNEL_ATTACHING +* 3. GuestPart is only the only partition allowed to change from +* CHANNEL_ATTACHED +* +* The state changes are the following: IOPart sees the channel is in UNINIT, +* UNINIT -> CHANNEL_ATTACHING (performed only by IOPart) +* CHANNEL_ATTACHING -> CHANNEL_ATTACHED (performed only by IOPart) +* CHANNEL_ATTACHED -> CHANNEL_OPENED (performed only by GuestPart) +*/ + +#include + +#include "vmcallinterface.h" + +#define _ULTRA_CONTROLVM_CHANNEL_INLINE_ +#include +#include "controlvmchannel.h" +#include "vbuschannel.h" +#undef _ULTRA_CONTROLVM_CHANNEL_INLINE_ +#include "channel.h" + +/* + * CHANNEL Guids + */ + +#include "channel_guid.h" + +#define ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE +#define ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE +#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE \ + ULTRA_CHANNEL_PROTOCOL_SIGNATURE + +/* Must increment these whenever you insert or delete fields within this channel +* struct. Also increment whenever you change the meaning of fields within this +* channel struct so as to break pre-existing software. Note that you can +* usually add fields to the END of the channel struct withOUT needing to +* increment this. */ +#define ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID 2 +#define ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID 2 +#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_VERSIONID 1 + +#define SPAR_VHBA_CHANNEL_OK_CLIENT(ch) \ + (spar_check_channel_client(ch, spar_vhba_channel_protocol_uuid, \ + "vhba", MIN_IO_CHANNEL_SIZE, \ + ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID, \ + ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE)) + +#define SPAR_VNIC_CHANNEL_OK_CLIENT(ch) \ + (spar_check_channel_client(ch, spar_vnic_channel_protocol_uuid, \ + "vnic", MIN_IO_CHANNEL_SIZE, \ + ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID, \ + ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE)) + +/* +* Everything necessary to handle SCSI & NIC traffic between Guest Partition and +* IO Partition is defined below. */ + +/* +* Defines and enums. +*/ + +#define MINNUM(a, b) (((a) < (b)) ? (a) : (b)) +#define MAXNUM(a, b) (((a) > (b)) ? (a) : (b)) + +/* these define the two queues per data channel between iopart and + * ioguestparts */ +#define IOCHAN_TO_IOPART 0 /* used by ioguestpart to 'insert' signals to + * iopart */ +#define IOCHAN_FROM_GUESTPART 0 /* used by iopart to 'remove' signals from + * ioguestpart - same queue as previous queue */ + +#define IOCHAN_TO_GUESTPART 1 /* used by iopart to 'insert' signals to + * ioguestpart */ +#define IOCHAN_FROM_IOPART 1 /* used by ioguestpart to 'remove' signals from + * iopart - same queue as previous queue */ + +/* these define the two queues per control channel between controlpart and "its" + * guests, which includes the iopart */ +#define CTRLCHAN_TO_CTRLGUESTPART 0 /* used by ctrlguestpart to 'insert' signals + * to ctrlpart */ +#define CTLRCHAN_FROM_CTRLPART 0 /* used by ctrlpart to 'remove' signals from + * ctrlquestpart - same queue as previous + * queue */ + +#define CTRLCHAN_TO_CTRLPART 1 /* used by ctrlpart to 'insert' signals to + * ctrlguestpart */ +#define CTRLCHAN_FROM_CTRLGUESTPART 1 /* used by ctrguestpart to 'remove' + * signals from ctrlpart - same queue as + * previous queue */ + +/* these define the Event & Ack queues per control channel Events are generated +* by CTRLGUESTPART and sent to CTRLPART; Acks are generated by CTRLPART and sent +* to CTRLGUESTPART. */ +#define CTRLCHAN_EVENT_TO_CTRLPART 2 /* used by ctrlguestpart to 'insert' Events + * to ctrlpart */ +#define CTRLCHAN_EVENT_FROM_CTRLGUESTPART 2 /* used by ctrlpart to 'remove' + * Events from ctrlguestpart */ + +#define CTRLCHAN_ACK_TO_CTRLGUESTPART 3 /* used by ctrlpart to 'insert' Acks to + * ctrlguestpart */ +#define CTRLCHAN_ACK_FROM_CTRLPART 3 /* used by ctrlguestpart to 'remove' Events + * from ctrlpart */ + +/* size of cdb - i.e., scsi cmnd */ +#define MAX_CMND_SIZE 16 + +#define MAX_SENSE_SIZE 64 + +#define MAX_PHYS_INFO 64 + +/* Because GuestToGuestCopy is limited to 4KiB segments, and we have limited the +* Emulex Driver to 256 scatter list segments via the lpfc_sg_seg_cnt parameter +* to 256, the maximum I/O size is limited to 256 * 4 KiB = 1 MB */ +#define MAX_IO_SIZE (1024*1024) /* 1 MB */ + +/* NOTE 1: lpfc defines its support for segments in +* #define LPFC_SG_SEG_CNT 64 +* +* NOTE 2: In Linux, frags array in skb is currently allocated to be +* MAX_SKB_FRAGS size, which is 18 which is smaller than MAX_PHYS_INFO for +* now. */ + +#ifndef MAX_SERIAL_NUM +#define MAX_SERIAL_NUM 32 +#endif /* MAX_SERIAL_NUM */ + +#define MAX_SCSI_BUSES 1 +#define MAX_SCSI_TARGETS 8 +#define MAX_SCSI_LUNS 16 +#define MAX_SCSI_FROM_HOST 0xFFFFFFFF /* Indicator to use Physical HBA + * SCSI Host value */ + +/* various types of network packets that can be sent in cmdrsp */ +enum net_types { + NET_RCV_POST = 0, /* submit buffer to hold receiving + * incoming packet */ + /* virtnic -> uisnic */ + NET_RCV, /* incoming packet received */ + /* uisnic -> virtpci */ + NET_XMIT, /* for outgoing net packets */ + /* virtnic -> uisnic */ + NET_XMIT_DONE, /* outgoing packet xmitted */ + /* uisnic -> virtpci */ + NET_RCV_ENBDIS, /* enable/disable packet reception */ + /* virtnic -> uisnic */ + NET_RCV_ENBDIS_ACK, /* acknowledge enable/disable packet + * reception */ + /* uisnic -> virtnic */ + NET_RCV_PROMISC, /* enable/disable promiscuous mode */ + /* virtnic -> uisnic */ + NET_CONNECT_STATUS, /* indicate the loss or restoration of a network + * connection */ + /* uisnic -> virtnic */ + NET_MACADDR, /* indicates the client has requested to update + * its MAC addr */ + NET_MACADDR_ACK, /* MAC address */ + +}; + +#define ETH_HEADER_SIZE 14 /* size of ethernet header */ + +#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */ +#define ETH_MIN_PACKET_SIZE (ETH_HEADER_SIZE + ETH_MIN_DATA_SIZE) + +#define ETH_DEF_DATA_SIZE 1500 /* default data size */ +#define ETH_DEF_PACKET_SIZE (ETH_HEADER_SIZE + ETH_DEF_DATA_SIZE) + +#define ETH_MAX_MTU 16384 /* maximum data size */ + +#ifndef MAX_MACADDR_LEN +#define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */ +#endif /* MAX_MACADDR_LEN */ + +#define ETH_IS_LOCALLY_ADMINISTERED(address) \ + (((u8 *)(address))[0] & ((u8)0x02)) +#define NIC_VENDOR_ID 0x0008000B + +/* various types of scsi task mgmt commands */ +enum task_mgmt_types { + TASK_MGMT_ABORT_TASK = 1, + TASK_MGMT_BUS_RESET, + TASK_MGMT_LUN_RESET, + TASK_MGMT_TARGET_RESET, +}; + +/* various types of vdisk mgmt commands */ +enum vdisk_mgmt_types { + VDISK_MGMT_ACQUIRE = 1, + VDISK_MGMT_RELEASE, +}; + +/* this is used in the vdest field */ +#define VDEST_ALL 0xFFFF + +#define MIN_NUMSIGNALS 64 +#define MAX_NUMSIGNALS 4096 + +/* MAX_NET_RCV_BUF specifies the number of rcv buffers that are created by each +* guest's virtnic and posted to uisnic. Uisnic, for each channel, keeps the rcv +* buffers posted and uses them to receive data on behalf of the guest's virtnic. +* NOTE: the num_rcv_bufs is configurable for each VNIC. So the following is +* simply an upperlimit on what each VNIC can provide. Setting it to half of the +* NUMSIGNALS to prevent queue full deadlocks */ +#define MAX_NET_RCV_BUFS (MIN_NUMSIGNALS / 2) + +/* + * structs with pragma pack */ + +/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */ +/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */ + +#pragma pack(push, 1) + +struct guest_phys_info { + u64 address; + u64 length; +}; + +#define GPI_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct guest_phys_info)) + +struct uisscsi_dest { + u32 channel; /* channel == bus number */ + u32 id; /* id == target number */ + u32 lun; /* lun == logical unit number */ +}; + +struct vhba_wwnn { + u32 wwnn1; + u32 wwnn2; +}; + +/* WARNING: Values stired in this structure must contain maximum counts (not + * maximum values). */ +struct vhba_config_max { /* 20 bytes */ + u32 max_channel; /* maximum channel for devices attached to this + * bus */ + u32 max_id; /* maximum SCSI ID for devices attached to this + * bus */ + u32 max_lun; /* maximum SCSI LUN for devices attached to this + * bus */ + u32 cmd_per_lun; /* maximum number of outstanding commands per + * lun that are allowed at one time */ + u32 max_io_size; /* maximum io size for devices attached to this + * bus */ + /* max io size is often determined by the resource of the hba. e.g */ + /* max scatter gather list length * page size / sector size */ +}; + +struct uiscmdrsp_scsi { + void *scsicmd; /* the handle to the cmd that was received - + * send it back as is in the rsp packet. */ + u8 cmnd[MAX_CMND_SIZE]; /* the cdb for the command */ + u32 bufflen; /* length of data to be transferred out or in */ + u16 guest_phys_entries; /* Number of entries in scatter-gather (sg) + * list */ + struct guest_phys_info gpi_list[MAX_PHYS_INFO]; /* physical address + * information for each + * fragment */ + enum dma_data_direction data_dir; /* direction of the data, if any */ + struct uisscsi_dest vdest; /* identifies the virtual hba, id, + * channel, lun to which cmd was sent */ + + /* the following fields are needed to queue the rsp back to cmd + * originator */ + int linuxstat; /* the original Linux status - for use by linux + * vdisk code */ + u8 scsistat; /* the scsi status */ + u8 addlstat; /* non-scsi status - covers cases like timeout + * needed by windows guests */ +#define ADDL_RESET 1 +#define ADDL_TIMEOUT 2 +#define ADDL_INTERNAL_ERROR 3 +#define ADDL_SEL_TIMEOUT 4 +#define ADDL_CMD_TIMEOUT 5 +#define ADDL_BAD_TARGET 6 +#define ADDL_RETRY 7 + + /* the following fields are need to determine the result of command */ + u8 sensebuf[MAX_SENSE_SIZE]; /* sense info in case cmd failed; */ + /* it holds the sense_data struct; */ + /* see that struct for details. */ + void *vdisk; /* contains pointer to the vdisk so that we can clean up + * when the IO completes. */ + int no_disk_result; /* used to return no disk inquiry result */ + /* when no_disk_result is set to 1, */ + /* scsi.scsistat is SAM_STAT_GOOD */ + /* scsi.addlstat is 0 */ + /* scsi.linuxstat is SAM_STAT_GOOD */ + /* That is, there is NO error. */ +}; + +/* +* Defines to support sending correct inquiry result when no disk is +* configured. */ + +/* From SCSI SPC2 - + * + * If the target is not capable of supporting a device on this logical unit, the + * device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b + * and PERIPHERAL DEVICE TYPE set to 1Fh). + * + *The device server is capable of supporting the specified peripheral device + *type on this logical unit. However, the physical device is not currently + *connected to this logical unit. + */ + +#define DEV_NOT_PRESENT 0x7f /* old name - compatibility */ +#define DEV_NOT_CAPABLE 0x7f /* peripheral qualifier of 0x3 */ + /* peripheral type of 0x1f */ + /* specifies no device but target present */ + +#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1 */ + /* peripheral type of 0 - disk */ + /* specifies device capable, but not present */ + +#define DEV_PROC_CAPABLE_NOT_PRESENT 0x23 /* peripheral qualifier of 0x1 */ + /* peripheral type of 3 - processor */ + /* specifies device capable, but not present */ + +#define DEV_HISUPPORT 0x10 /* HiSup = 1; shows support for report luns */ + /* must be returned for lun 0. */ + +/* NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length +* in buf[4] some linux code accesses bytes beyond 5 to retrieve vendor, product +* & revision. Yikes! So let us always send back 36 bytes, the minimum for +* inquiry result. */ +#define NO_DISK_INQUIRY_RESULT_LEN 36 + +#define MIN_INQUIRY_RESULT_LEN 5 /* we need at least 5 bytes minimum for inquiry + * result */ + +/* SCSI device version for no disk inquiry result */ +#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */ + +/* Windows and Linux want different things for a non-existent lun. So, we'll let + * caller pass in the peripheral qualifier and type. + * NOTE:[4] SCSI returns (n-4); so we return length-1-4 or length-5. */ + +#define SET_NO_DISK_INQUIRY_RESULT(buf, len, lun, lun0notpresent, notpresent) \ + do { \ + memset(buf, 0, \ + MINNUM(len, \ + (unsigned int)NO_DISK_INQUIRY_RESULT_LEN)); \ + buf[2] = (u8)SCSI_SPC2_VER; \ + if (lun == 0) { \ + buf[0] = (u8)lun0notpresent; \ + buf[3] = (u8)DEV_HISUPPORT; \ + } else \ + buf[0] = (u8)notpresent; \ + buf[4] = (u8)( \ + MINNUM(len, \ + (unsigned int)NO_DISK_INQUIRY_RESULT_LEN) - 5);\ + if (len >= NO_DISK_INQUIRY_RESULT_LEN) { \ + buf[8] = 'D'; \ + buf[9] = 'E'; \ + buf[10] = 'L'; \ + buf[11] = 'L'; \ + buf[16] = 'P'; \ + buf[17] = 'S'; \ + buf[18] = 'E'; \ + buf[19] = 'U'; \ + buf[20] = 'D'; \ + buf[21] = 'O'; \ + buf[22] = ' '; \ + buf[23] = 'D'; \ + buf[24] = 'E'; \ + buf[25] = 'V'; \ + buf[26] = 'I'; \ + buf[27] = 'C'; \ + buf[28] = 'E'; \ + buf[30] = ' '; \ + buf[31] = '.'; \ + } \ + } while (0) + +/* +* Struct & Defines to support sense information. +*/ + +/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is +* initialized in exactly the manner that is recommended in Windows (hence the +* odd values). +* When set, these fields will have the following values: +* ErrorCode = 0x70 indicates current error +* Valid = 1 indicates sense info is valid +* SenseKey contains sense key as defined by SCSI specs. +* AdditionalSenseCode contains sense key as defined by SCSI specs. +* AdditionalSenseCodeQualifier contains qualifier to sense code as defined by +* scsi docs. +* AdditionalSenseLength contains will be sizeof(sense_data)-8=10. +*/ +struct sense_data { + u8 errorcode:7; + u8 valid:1; + u8 segment_number; + u8 sense_key:4; + u8 reserved:1; + u8 incorrect_length:1; + u8 end_of_media:1; + u8 file_mark:1; + u8 information[4]; + u8 additional_sense_length; + u8 command_specific_information[4]; + u8 additional_sense_code; + u8 additional_sense_code_qualifier; + u8 fru_code; + u8 sense_key_specific[3]; +}; + +/* some SCSI ADSENSE codes */ +#ifndef SCSI_ADSENSE_LUN_NOT_READY +#define SCSI_ADSENSE_LUN_NOT_READY 0x04 +#endif /* */ +#ifndef SCSI_ADSENSE_ILLEGAL_COMMAND +#define SCSI_ADSENSE_ILLEGAL_COMMAND 0x20 +#endif /* */ +#ifndef SCSI_ADSENSE_ILLEGAL_BLOCK +#endif /* */ +#ifndef SCSI_ADSENSE_ILLEGAL_BLOCK +#define SCSI_ADSENSE_ILLEGAL_BLOCK 0x21 +#endif /* */ +#ifndef SCSI_ADSENSE_INVALID_CDB +#define SCSI_ADSENSE_INVALID_CDB 0x24 +#endif /* */ +#ifndef SCSI_ADSENSE_INVALID_LUN +#define SCSI_ADSENSE_INVALID_LUN 0x25 +#endif /* */ +#ifndef SCSI_ADWRITE_PROTECT +#define SCSI_ADWRITE_PROTECT 0x27 +#endif /* */ +#ifndef SCSI_ADSENSE_MEDIUM_CHANGED +#define SCSI_ADSENSE_MEDIUM_CHANGED 0x28 +#endif /* */ +#ifndef SCSI_ADSENSE_BUS_RESET +#define SCSI_ADSENSE_BUS_RESET 0x29 +#endif /* */ +#ifndef SCSI_ADSENSE_NO_MEDIA_IN_DEVICE +#define SCSI_ADSENSE_NO_MEDIA_IN_DEVICE 0x3a +#endif /* */ + +struct net_pkt_xmt { + int len; /* full length of data in the packet */ + int num_frags; /* number of fragments in frags containing data */ + struct phys_info frags[MAX_PHYS_INFO]; /* physical page information for + * each fragment */ + char ethhdr[ETH_HEADER_SIZE]; /* the ethernet header */ + struct { + /* these are needed for csum at uisnic end */ + u8 valid; /* 1 = rest of this struct is valid - else + * ignore */ + u8 hrawoffv; /* 1 = hwrafoff is valid */ + u8 nhrawoffv; /* 1 = nhwrafoff is valid */ + u16 protocol; /* specifies packet protocol */ + u32 csum; /* value used to set skb->csum at IOPart */ + u32 hrawoff; /* value used to set skb->h.raw at IOPart */ + /* hrawoff points to the start of the TRANSPORT LAYER HEADER */ + u32 nhrawoff; /* value used to set skb->nh.raw at IOPart */ + /* nhrawoff points to the start of the NETWORK LAYER HEADER */ + } lincsum; + + /* **** NOTE **** + * The full packet is described in frags but the ethernet header is + * separately kept in ethhdr so that uisnic doesn't have "MAP" the + * guest memory to get to the header. uisnic needs ethhdr to + * determine how to route the packet. + */ +}; + +struct net_pkt_xmtdone { + u32 xmt_done_result; /* result of NET_XMIT */ +#define XMIT_SUCCESS 0 +#define XMIT_FAILED 1 +}; + +/* RCVPOST_BUF_SIZe must be at most page_size(4096) - cache_line_size (64) The +* reason is because dev_skb_alloc which is used to generate RCV_POST skbs in +* virtnic requires that there is "overhead" in the buffer, and pads 16 bytes. I +* prefer to use 1 full cache line size for "overhead" so that transfers are +* better. IOVM requires that a buffer be represented by 1 phys_info structure +* which can only cover page_size. */ +#define RCVPOST_BUF_SIZE 4032 +#define MAX_NET_RCV_CHAIN \ + ((ETH_MAX_MTU+ETH_HEADER_SIZE + RCVPOST_BUF_SIZE-1) / RCVPOST_BUF_SIZE) + +struct net_pkt_rcvpost { + /* rcv buf size must be large enough to include ethernet data len + + * ethernet header len - we are choosing 2K because it is guaranteed + * to be describable */ + struct phys_info frag; /* physical page information for the + * single fragment 2K rcv buf */ + u64 unique_num; /* This is used to make sure that + * receive posts are returned to */ + /* the Adapter which sent them origonally. */ +}; + +struct net_pkt_rcv { + /* the number of receive buffers that can be chained */ + /* is based on max mtu and size of each rcv buf */ + u32 rcv_done_len; /* length of received data */ + u8 numrcvbufs; /* number of receive buffers that contain the */ + /* incoming data; guest end MUST chain these together. */ + void *rcvbuf[MAX_NET_RCV_CHAIN]; /* the list of receive buffers + * that must be chained; */ + /* each entry is a receive buffer provided by NET_RCV_POST. */ + /* NOTE: first rcvbuf in the chain will also be provided in net.buf. */ + u64 unique_num; + u32 rcvs_dropped_delta; +}; + +struct net_pkt_enbdis { + void *context; + u16 enable; /* 1 = enable, 0 = disable */ +}; + +struct net_pkt_macaddr { + void *context; + u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */ +}; + +/* cmd rsp packet used for VNIC network traffic */ +struct uiscmdrsp_net { + enum net_types type; + void *buf; + union { + struct net_pkt_xmt xmt; /* used for NET_XMIT */ + struct net_pkt_xmtdone xmtdone; /* used for NET_XMIT_DONE */ + struct net_pkt_rcvpost rcvpost; /* used for NET_RCV_POST */ + struct net_pkt_rcv rcv; /* used for NET_RCV */ + struct net_pkt_enbdis enbdis; /* used for NET_RCV_ENBDIS, */ + /* NET_RCV_ENBDIS_ACK, */ + /* NET_RCV_PROMSIC, */ + /* and NET_CONNECT_STATUS */ + struct net_pkt_macaddr macaddr; + }; +}; + +struct uiscmdrsp_scsitaskmgmt { + enum task_mgmt_types tasktype; + + /* the type of task */ + struct uisscsi_dest vdest; + + /* the vdisk for which this task mgmt is generated */ + void *scsicmd; + + /* This is some handle that the guest has saved off for its own use. + * Its value is preserved by iopart & returned as is in the task mgmt + * rsp. */ + void *notify; + + /* For linux guests, this is a pointer to wait_queue_head that a + * thread is waiting on to see if the taskmgmt command has completed. + * For windows guests, this is a pointer to a location that a waiting + * thread is testing to see if the taskmgmt command has completed. + * When the rsp is received by guest, the thread receiving the + * response uses this to notify the thread waiting for taskmgmt + * command completion. Its value is preserved by iopart & returned + * as is in the task mgmt rsp. */ + void *notifyresult; + + /* this is a handle to location in guest where the result of the + * taskmgmt command (result field) is to saved off when the response + * is handled. Its value is preserved by iopart & returned as is in + * the task mgmt rsp. */ + char result; + + /* result of taskmgmt command - set by IOPart - values are: */ +#define TASK_MGMT_FAILED 0 +#define TASK_MGMT_SUCCESS 1 +}; + +/* The following is used by uissd to send disk add/remove notifications to + * Guest */ +/* Note that the vHba pointer is not used by the Client/Guest side. */ +struct uiscmdrsp_disknotify { + u8 add; /* 0-remove, 1-add */ + void *v_hba; /* Pointer to vhba_info for channel info to + * route msg */ + u32 channel, id, lun; /* SCSI Path of Disk to added or removed */ +}; + +/* The following is used by virthba/vSCSI to send the Acquire/Release commands +* to the IOVM. */ +struct uiscmdrsp_vdiskmgmt { + enum vdisk_mgmt_types vdisktype; + + /* the type of task */ + struct uisscsi_dest vdest; + + /* the vdisk for which this task mgmt is generated */ + void *scsicmd; + + /* This is some handle that the guest has saved off for its own use. + * Its value is preserved by iopart & returned as is in the task mgmt + * rsp. */ + void *notify; + + /* For linux guests, this is a pointer to wait_queue_head that a + * thread is waiting on to see if the taskmgmt command has completed. + * For windows guests, this is a pointer to a location that a waiting + * thread is testing to see if the taskmgmt command has completed. + * When the rsp is received by guest, the thread receiving the + * response uses this to notify the thread waiting for taskmgmt + * command completion. Its value is preserved by iopart & returned + * as is in the task mgmt rsp. */ + void *notifyresult; + + /* this is a handle to location in guest where the result of the + * taskmgmt command (result field) is to saved off when the response + * is handled. Its value is preserved by iopart & returned as is in + * the task mgmt rsp. */ + char result; + + /* result of taskmgmt command - set by IOPart - values are: */ +#define VDISK_MGMT_FAILED 0 +#define VDISK_MGMT_SUCCESS 1 +}; + +/* keeping cmd & rsp info in one structure for now cmd rsp packet for scsi */ +struct uiscmdrsp { + char cmdtype; + + /* describes what type of information is in the struct */ +#define CMD_SCSI_TYPE 1 +#define CMD_NET_TYPE 2 +#define CMD_SCSITASKMGMT_TYPE 3 +#define CMD_NOTIFYGUEST_TYPE 4 +#define CMD_VDISKMGMT_TYPE 5 + union { + struct uiscmdrsp_scsi scsi; + struct uiscmdrsp_net net; + struct uiscmdrsp_scsitaskmgmt scsitaskmgmt; + struct uiscmdrsp_disknotify disknotify; + struct uiscmdrsp_vdiskmgmt vdiskmgmt; + }; + void *private_data; /* used to send the response when the cmd is + * done (scsi & scsittaskmgmt). */ + struct uiscmdrsp *next; /* General Purpose Queue Link */ + struct uiscmdrsp *activeQ_next; /* Used to track active commands */ + struct uiscmdrsp *activeQ_prev; /* Used to track active commands */ +}; + +/* This is just the header of the IO channel. It is assumed that directly after +* this header there is a large region of memory which contains the command and +* response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS. */ +struct spar_io_channel_protocol { + struct channel_header channel_header; + struct signal_queue_header cmd_q; + struct signal_queue_header rsp_q; + union { + struct { + struct vhba_wwnn wwnn; /* 8 bytes */ + struct vhba_config_max max; /* 20 bytes */ + } vhba; /* 28 */ + struct { + u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */ + u32 num_rcv_bufs; /* 4 */ + u32 mtu; /* 4 */ + uuid_le zone_uuid; /* 16 */ + } vnic; /* total 30 */ + }; + +#define MAX_CLIENTSTRING_LEN 1024 + u8 client_string[MAX_CLIENTSTRING_LEN];/* NULL terminated - so holds + * max - 1 bytes */ +}; + +#pragma pack(pop) +/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */ + +/* define offsets to members of struct uiscmdrsp */ +#define OFFSET_CMDTYPE offsetof(struct uiscmdrsp, cmdtype) +#define OFFSET_SCSI offsetof(struct uiscmdrsp, scsi) +#define OFFSET_NET offsetof(struct uiscmdrsp, net) +#define OFFSET_SCSITASKMGMT offsetof(struct uiscmdrsp, scsitaskmgmt) +#define OFFSET_NEXT offsetof(struct uiscmdrsp, next) + +/* define offsets to members of struct uiscmdrsp_net */ +#define OFFSET_TYPE offsetof(struct uiscmdrsp_net, type) +#define OFFSET_BUF offsetof(struct uiscmdrsp_net, buf) +#define OFFSET_XMT offsetof(struct uiscmdrsp_net, xmt) +#define OFFSET_XMT_DONE_RESULT offsetof(struct uiscmdrsp_net, xmtdone) +#define OFFSET_RCVPOST offsetof(struct uiscmdrsp_net, rcvpost) +#define OFFSET_RCV_DONE_LEN offsetof(struct uiscmdrsp_net, rcv) +#define OFFSET_ENBDIS offsetof(struct uiscmdrsp_net, enbdis) + +/* define offsets to members of struct net_pkt_rcvpost */ +#define OFFSET_TOTALLEN offsetof(struct net_pkt_rcvpost, totallen) +#define OFFSET_FRAG offsetof(struct net_pkt_rcvpost, frag) + +/* +* INLINE functions for initializing and accessing I/O data channels +*/ + +#define SIZEOF_PROTOCOL (COVER(sizeof(struct spar_io_channel_protocol), 64)) +#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64)) + +#define MIN_IO_CHANNEL_SIZE COVER(SIZEOF_PROTOCOL + \ + 2 * MIN_NUMSIGNALS * SIZEOF_CMDRSP, 4096) + +/* +* INLINE function for expanding a guest's pfn-off-size into multiple 4K page +* pfn-off-size entires. +*/ + +/* we deal with 4K page sizes when we it comes to passing page information + * between */ +/* Guest and IOPartition. */ +#define PI_PAGE_SIZE 0x1000 +#define PI_PAGE_MASK 0x0FFF +#define PI_PAGE_SHIFT 12 + +/* returns next non-zero index on success or zero on failure (i.e. out of + * room) + */ +static inline u16 +add_physinfo_entries(u32 inp_pfn, /* input - specifies the pfn to be used + * to add entries */ + u16 inp_off, /* input - specifies the off to be used + * to add entries */ + u32 inp_len, /* input - specifies the len to be used + * to add entries */ + u16 index, /* input - index in array at which new + * entries are added */ + u16 max_pi_arr_entries, /* input - specifies the maximum + * entries pi_arr can hold */ + struct phys_info pi_arr[]) /* input & output - array to + * which entries are added */ +{ + u32 len; + u16 i, firstlen; + + firstlen = PI_PAGE_SIZE - inp_off; + if (inp_len <= firstlen) { + /* the input entry spans only one page - add as is */ + if (index >= max_pi_arr_entries) + return 0; + pi_arr[index].pi_pfn = inp_pfn; + pi_arr[index].pi_off = (u16)inp_off; + pi_arr[index].pi_len = (u16)inp_len; + return index + 1; + } + + /* this entry spans multiple pages */ + for (len = inp_len, i = 0; len; + len -= pi_arr[index + i].pi_len, i++) { + if (index + i >= max_pi_arr_entries) + return 0; + pi_arr[index + i].pi_pfn = inp_pfn + i; + if (i == 0) { + pi_arr[index].pi_off = inp_off; + pi_arr[index].pi_len = firstlen; + } + + else { + pi_arr[index + i].pi_off = 0; + pi_arr[index + i].pi_len = + (u16)MINNUM(len, (u32)PI_PAGE_SIZE); + } + } + return index + i; +} + +#endif /* __IOCHANNEL_H__ */ diff --git a/kernel/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h b/kernel/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h new file mode 100644 index 000000000..2c42ce16e --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h @@ -0,0 +1,94 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __VBUSCHANNEL_H__ +#define __VBUSCHANNEL_H__ + +/* The vbus channel is the channel area provided via the BUS_CREATE controlvm + * message for each virtual bus. This channel area is provided to both server + * and client ends of the bus. The channel header area is initialized by + * the server, and the remaining information is filled in by the client. + * We currently use this for the client to provide various information about + * the client devices and client drivers for the server end to see. + */ +#include +#include "vbusdeviceinfo.h" +#include "channel.h" + +/* {193b331b-c58f-11da-95a9-00e08161165f} */ +#define SPAR_VBUS_CHANNEL_PROTOCOL_UUID \ + UUID_LE(0x193b331b, 0xc58f, 0x11da, \ + 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f) +static const uuid_le spar_vbus_channel_protocol_uuid = + SPAR_VBUS_CHANNEL_PROTOCOL_UUID; + +#define SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE + +/* Must increment this whenever you insert or delete fields within this channel +* struct. Also increment whenever you change the meaning of fields within this +* channel struct so as to break pre-existing software. Note that you can +* usually add fields to the END of the channel struct withOUT needing to +* increment this. */ +#define SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID 1 + +#define SPAR_VBUS_CHANNEL_OK_CLIENT(ch) \ + spar_check_channel_client(ch, \ + spar_vbus_channel_protocol_uuid, \ + "vbus", \ + sizeof(struct spar_vbus_channel_protocol),\ + SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID, \ + SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE) + +#define SPAR_VBUS_CHANNEL_OK_SERVER(actual_bytes) \ + (spar_check_channel_server(spar_vbus_channel_protocol_uuid, \ + "vbus", \ + sizeof(struct ultra_vbus_channel_protocol),\ + actual_bytes)) + +#pragma pack(push, 1) /* both GCC and VC now allow this pragma */ +struct spar_vbus_headerinfo { + u32 struct_bytes; /* size of this struct in bytes */ + u32 device_info_struct_bytes; /* sizeof(ULTRA_VBUS_DEVICEINFO) */ + u32 dev_info_count; /* num of items in DevInfo member */ + /* (this is the allocated size) */ + u32 chp_info_offset; /* byte offset from beginning of this struct */ + /* to the ChpInfo struct (below) */ + u32 bus_info_offset; /* byte offset from beginning of this struct */ + /* to the BusInfo struct (below) */ + u32 dev_info_offset; /* byte offset from beginning of this struct */ + /* to the DevInfo array (below) */ + u8 reserved[104]; +}; + +struct spar_vbus_channel_protocol { + struct channel_header channel_header; /* initialized by server */ + struct spar_vbus_headerinfo hdr_info; /* initialized by server */ + /* the remainder of this channel is filled in by the client */ + struct ultra_vbus_deviceinfo chp_info; + /* describes client chipset device and driver */ + struct ultra_vbus_deviceinfo bus_info; + /* describes client bus device and driver */ + struct ultra_vbus_deviceinfo dev_info[0]; + /* describes client device and driver for each device on the bus */ +}; + +#define VBUS_CH_SIZE_EXACT(MAXDEVICES) \ + (sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL) + ((MAXDEVICES) * \ + sizeof(ULTRA_VBUS_DEVICEINFO))) +#define VBUS_CH_SIZE(MAXDEVICES) COVER(VBUS_CH_SIZE_EXACT(MAXDEVICES), 4096) + +#pragma pack(pop) + +#endif diff --git a/kernel/drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h b/kernel/drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h new file mode 100644 index 000000000..f74f5d8c2 --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/controlvmcompletionstatus.h @@ -0,0 +1,94 @@ +/* controlvmcompletionstatus.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* Defines for all valid values returned in the response message header + * completionStatus field. See controlvmchannel.h for description of + * the header: _CONTROLVM_MESSAGE_HEADER. + */ + +#ifndef __CONTROLVMCOMPLETIONSTATUS_H__ +#define __CONTROLVMCOMPLETIONSTATUS_H__ + +/* General Errors------------------------------------------------------[0-99] */ +#define CONTROLVM_RESP_SUCCESS 0 +#define CONTROLVM_RESP_ERROR_ALREADY_DONE 1 +#define CONTROLVM_RESP_ERROR_IOREMAP_FAILED 2 +#define CONTROLVM_RESP_ERROR_KMALLOC_FAILED 3 +#define CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN 4 +#define CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT 5 + +/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */ +#define CONTROLVM_RESP_ERROR_CLIENT_SWITCHCOUNT_NONZERO 100 +#define CONTROLVM_RESP_ERROR_EXPECTED_CHIPSET_INIT 101 + +/* Maximum Limit----------------------------------------------------[200-299] */ +#define CONTROLVM_RESP_ERROR_MAX_BUSES 201 /* BUS_CREATE */ +#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202 /* DEVICE_CREATE */ +/* Payload and Parameter Related------------------------------------[400-499] */ +#define CONTROLVM_RESP_ERROR_PAYLOAD_INVALID 400 /* SWITCH_ATTACHEXTPORT, + * DEVICE_CONFIGURE */ +#define CONTROLVM_RESP_ERROR_INITIATOR_PARAMETER_INVALID 401 /* Multiple */ +#define CONTROLVM_RESP_ERROR_TARGET_PARAMETER_INVALID 402 /* DEVICE_CONFIGURE */ +#define CONTROLVM_RESP_ERROR_CLIENT_PARAMETER_INVALID 403 /* DEVICE_CONFIGURE */ +/* Specified[Packet Structure] Value-------------------------------[500-599] */ +#define CONTROLVM_RESP_ERROR_BUS_INVALID 500 /* SWITCH_ATTACHINTPORT, + * BUS_CONFIGURE, + * DEVICE_CREATE, + * DEVICE_CONFIG + * DEVICE_DESTROY */ +#define CONTROLVM_RESP_ERROR_DEVICE_INVALID 501 /* SWITCH_ATTACHINTPORT */ + /* DEVICE_CREATE, + * DEVICE_CONFIGURE, + * DEVICE_DESTROY */ +#define CONTROLVM_RESP_ERROR_CHANNEL_INVALID 502 /* DEVICE_CREATE, + * DEVICE_CONFIGURE */ +/* Partition Driver Callback Interface----------------------[600-699] */ +#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE 604 /* BUS_CREATE, + * BUS_DESTROY, + * DEVICE_CREATE, + * DEVICE_DESTROY */ +/* Unable to invoke VIRTPCI callback */ +#define CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR 605 + /* BUS_CREATE, + * BUS_DESTROY, + * DEVICE_CREATE, + * DEVICE_DESTROY */ +/* VIRTPCI Callback returned error */ +#define CONTROLVM_RESP_ERROR_GENERIC_DRIVER_CALLBACK_ERROR 606 + /* SWITCH_ATTACHEXTPORT, + * SWITCH_DETACHEXTPORT + * DEVICE_CONFIGURE */ + +/* generic device callback returned error */ +/* Bus Related------------------------------------------------------[700-799] */ +#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700 /* BUS_DESTROY */ +/* Channel Related--------------------------------------------------[800-899] */ +#define CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN 800 /* GET_CHANNELINFO, + * DEVICE_DESTROY */ +#define CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL 801 /* DEVICE_CREATE */ +/* Chipset Shutdown Related---------------------------------------[1000-1099] */ +#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_FAILED 1000 +#define CONTROLVM_RESP_ERROR_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001 + +/* Chipset Stop Related-------------------------------------------[1100-1199] */ +#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_BUS 1100 +#define CONTROLVM_RESP_ERROR_CHIPSET_STOP_FAILED_SWITCH 1101 + +/* Device Related-------------------------------------------------[1400-1499] */ +#define CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT 1400 + +#endif /* __CONTROLVMCOMPLETIONSTATUS_H__ not defined */ diff --git a/kernel/drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h b/kernel/drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h new file mode 100644 index 000000000..18cc9ed27 --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/diagnostics/appos_subsystems.h @@ -0,0 +1,310 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* Please note that this file is to be used ONLY for defining diagnostic + * subsystem values for the appos (sPAR Linux service partitions) component. + */ +#ifndef __APPOS_SUBSYSTEMS_H__ +#define __APPOS_SUBSYSTEMS_H__ + +#ifdef __KERNEL__ +#include +#include +#else +#include +#include +#endif + +static inline char * +subsys_unknown_to_s(int subsys, char *s, int n) +{ + snprintf(s, n, "SUBSYS-%-2.2d", subsys); + s[n - 1] = '\0'; + return s; +} + +#define SUBSYS_TO_MASK(subsys) (1ULL << (subsys)) + +/* The first SUBSYS_APPOS_MAX subsystems are the same for each AppOS type + * (IOVM, SMS, etc.) The rest have unique values for each AppOS type. + */ +#define SUBSYS_APPOS_MAX 16 + +#define SUBSYS_APPOS_DEFAULT 1 /* or "other" */ +#define SUBSYS_APPOS_CHIPSET 2 /* controlvm and other */ + /* low-level sPAR activity */ +#define SUBSYS_APPOS_BUS 3 /* sPAR bus */ +/* DAK #define SUBSYS_APPOS_DIAG 4 // diagnostics and dump */ +#define SUBSYS_APPOS_CHANNELACCESS 5 /* generic channel access */ +#define SUBSYS_APPOS_NICCLIENT 6 /* virtual NIC client */ +#define SUBSYS_APPOS_HBACLIENT 7 /* virtual HBA client */ +#define SUBSYS_APPOS_CONSOLESERIAL 8 /* sPAR virtual serial console */ +#define SUBSYS_APPOS_UISLIB 9 /* */ +#define SUBSYS_APPOS_VRTCUPDD 10 /* */ +#define SUBSYS_APPOS_WATCHDOG 11 /* watchdog timer and healthcheck */ +#define SUBSYS_APPOS_13 13 /* available */ +#define SUBSYS_APPOS_14 14 /* available */ +#define SUBSYS_APPOS_15 15 /* available */ +#define SUBSYS_APPOS_16 16 /* available */ +static inline char * +subsys_generic_to_s(int subsys, char *s, int n) +{ + switch (subsys) { + case SUBSYS_APPOS_DEFAULT: + strncpy(s, "APPOS_DEFAULT", n); + break; + case SUBSYS_APPOS_CHIPSET: + strncpy(s, "APPOS_CHIPSET", n); + break; + case SUBSYS_APPOS_BUS: + strncpy(s, "APPOS_BUS", n); + break; + case SUBSYS_APPOS_CHANNELACCESS: + strncpy(s, "APPOS_CHANNELACCESS", n); + break; + case SUBSYS_APPOS_NICCLIENT: + strncpy(s, "APPOS_NICCLIENT", n); + break; + case SUBSYS_APPOS_HBACLIENT: + strncpy(s, "APPOS_HBACLIENT", n); + break; + case SUBSYS_APPOS_CONSOLESERIAL: + strncpy(s, "APPOS_CONSOLESERIAL", n); + break; + case SUBSYS_APPOS_UISLIB: + strncpy(s, "APPOS_UISLIB", n); + break; + case SUBSYS_APPOS_VRTCUPDD: + strncpy(s, "APPOS_VRTCUPDD", n); + break; + case SUBSYS_APPOS_WATCHDOG: + strncpy(s, "APPOS_WATCHDOG", n); + break; + case SUBSYS_APPOS_13: + strncpy(s, "APPOS_13", n); + break; + case SUBSYS_APPOS_14: + strncpy(s, "APPOS_14", n); + break; + case SUBSYS_APPOS_15: + strncpy(s, "APPOS_15", n); + break; + case SUBSYS_APPOS_16: + strncpy(s, "APPOS_16", n); + break; + default: + subsys_unknown_to_s(subsys, s, n); + break; + } + s[n - 1] = '\0'; + return s; +} + +/* CONSOLE */ + +#define SUBSYS_CONSOLE_VIDEO (SUBSYS_APPOS_MAX + 1) /* 17 */ +#define SUBSYS_CONSOLE_KBDMOU (SUBSYS_APPOS_MAX + 2) /* 18 */ +#define SUBSYS_CONSOLE_04 (SUBSYS_APPOS_MAX + 4) +#define SUBSYS_CONSOLE_05 (SUBSYS_APPOS_MAX + 5) +#define SUBSYS_CONSOLE_06 (SUBSYS_APPOS_MAX + 6) +#define SUBSYS_CONSOLE_07 (SUBSYS_APPOS_MAX + 7) +#define SUBSYS_CONSOLE_08 (SUBSYS_APPOS_MAX + 8) +#define SUBSYS_CONSOLE_09 (SUBSYS_APPOS_MAX + 9) +#define SUBSYS_CONSOLE_10 (SUBSYS_APPOS_MAX + 10) +#define SUBSYS_CONSOLE_11 (SUBSYS_APPOS_MAX + 11) +#define SUBSYS_CONSOLE_12 (SUBSYS_APPOS_MAX + 12) +#define SUBSYS_CONSOLE_13 (SUBSYS_APPOS_MAX + 13) +#define SUBSYS_CONSOLE_14 (SUBSYS_APPOS_MAX + 14) +#define SUBSYS_CONSOLE_15 (SUBSYS_APPOS_MAX + 15) +#define SUBSYS_CONSOLE_16 (SUBSYS_APPOS_MAX + 16) +#define SUBSYS_CONSOLE_17 (SUBSYS_APPOS_MAX + 17) +#define SUBSYS_CONSOLE_18 (SUBSYS_APPOS_MAX + 18) +#define SUBSYS_CONSOLE_19 (SUBSYS_APPOS_MAX + 19) +#define SUBSYS_CONSOLE_20 (SUBSYS_APPOS_MAX + 20) +#define SUBSYS_CONSOLE_21 (SUBSYS_APPOS_MAX + 21) +#define SUBSYS_CONSOLE_22 (SUBSYS_APPOS_MAX + 22) +#define SUBSYS_CONSOLE_23 (SUBSYS_APPOS_MAX + 23) +#define SUBSYS_CONSOLE_24 (SUBSYS_APPOS_MAX + 24) +#define SUBSYS_CONSOLE_25 (SUBSYS_APPOS_MAX + 25) +#define SUBSYS_CONSOLE_26 (SUBSYS_APPOS_MAX + 26) +#define SUBSYS_CONSOLE_27 (SUBSYS_APPOS_MAX + 27) +#define SUBSYS_CONSOLE_28 (SUBSYS_APPOS_MAX + 28) +#define SUBSYS_CONSOLE_29 (SUBSYS_APPOS_MAX + 29) +#define SUBSYS_CONSOLE_30 (SUBSYS_APPOS_MAX + 30) +#define SUBSYS_CONSOLE_31 (SUBSYS_APPOS_MAX + 31) +#define SUBSYS_CONSOLE_32 (SUBSYS_APPOS_MAX + 32) +#define SUBSYS_CONSOLE_33 (SUBSYS_APPOS_MAX + 33) +#define SUBSYS_CONSOLE_34 (SUBSYS_APPOS_MAX + 34) +#define SUBSYS_CONSOLE_35 (SUBSYS_APPOS_MAX + 35) +#define SUBSYS_CONSOLE_36 (SUBSYS_APPOS_MAX + 36) +#define SUBSYS_CONSOLE_37 (SUBSYS_APPOS_MAX + 37) +#define SUBSYS_CONSOLE_38 (SUBSYS_APPOS_MAX + 38) +#define SUBSYS_CONSOLE_39 (SUBSYS_APPOS_MAX + 39) +#define SUBSYS_CONSOLE_40 (SUBSYS_APPOS_MAX + 40) +#define SUBSYS_CONSOLE_41 (SUBSYS_APPOS_MAX + 41) +#define SUBSYS_CONSOLE_42 (SUBSYS_APPOS_MAX + 42) +#define SUBSYS_CONSOLE_43 (SUBSYS_APPOS_MAX + 43) +#define SUBSYS_CONSOLE_44 (SUBSYS_APPOS_MAX + 44) +#define SUBSYS_CONSOLE_45 (SUBSYS_APPOS_MAX + 45) +#define SUBSYS_CONSOLE_46 (SUBSYS_APPOS_MAX + 46) + +static inline char * +subsys_console_to_s(int subsys, char *s, int n) +{ + switch (subsys) { + case SUBSYS_CONSOLE_VIDEO: + strncpy(s, "CONSOLE_VIDEO", n); + break; + case SUBSYS_CONSOLE_KBDMOU: + strncpy(s, "CONSOLE_KBDMOU", n); + break; + case SUBSYS_CONSOLE_04: + strncpy(s, "CONSOLE_04", n); + break; + case SUBSYS_CONSOLE_05: + strncpy(s, "CONSOLE_05", n); + break; + case SUBSYS_CONSOLE_06: + strncpy(s, "CONSOLE_06", n); + break; + case SUBSYS_CONSOLE_07: + strncpy(s, "CONSOLE_07", n); + break; + case SUBSYS_CONSOLE_08: + strncpy(s, "CONSOLE_08", n); + break; + case SUBSYS_CONSOLE_09: + strncpy(s, "CONSOLE_09", n); + break; + case SUBSYS_CONSOLE_10: + strncpy(s, "CONSOLE_10", n); + break; + case SUBSYS_CONSOLE_11: + strncpy(s, "CONSOLE_11", n); + break; + case SUBSYS_CONSOLE_12: + strncpy(s, "CONSOLE_12", n); + break; + case SUBSYS_CONSOLE_13: + strncpy(s, "CONSOLE_13", n); + break; + case SUBSYS_CONSOLE_14: + strncpy(s, "CONSOLE_14", n); + break; + case SUBSYS_CONSOLE_15: + strncpy(s, "CONSOLE_15", n); + break; + case SUBSYS_CONSOLE_16: + strncpy(s, "CONSOLE_16", n); + break; + case SUBSYS_CONSOLE_17: + strncpy(s, "CONSOLE_17", n); + break; + case SUBSYS_CONSOLE_18: + strncpy(s, "CONSOLE_18", n); + break; + case SUBSYS_CONSOLE_19: + strncpy(s, "CONSOLE_19", n); + break; + case SUBSYS_CONSOLE_20: + strncpy(s, "CONSOLE_20", n); + break; + case SUBSYS_CONSOLE_21: + strncpy(s, "CONSOLE_21", n); + break; + case SUBSYS_CONSOLE_22: + strncpy(s, "CONSOLE_22", n); + break; + case SUBSYS_CONSOLE_23: + strncpy(s, "CONSOLE_23", n); + break; + case SUBSYS_CONSOLE_24: + strncpy(s, "CONSOLE_24", n); + break; + case SUBSYS_CONSOLE_25: + strncpy(s, "CONSOLE_25", n); + break; + case SUBSYS_CONSOLE_26: + strncpy(s, "CONSOLE_26", n); + break; + case SUBSYS_CONSOLE_27: + strncpy(s, "CONSOLE_27", n); + break; + case SUBSYS_CONSOLE_28: + strncpy(s, "CONSOLE_28", n); + break; + case SUBSYS_CONSOLE_29: + strncpy(s, "CONSOLE_29", n); + break; + case SUBSYS_CONSOLE_30: + strncpy(s, "CONSOLE_30", n); + break; + case SUBSYS_CONSOLE_31: + strncpy(s, "CONSOLE_31", n); + break; + case SUBSYS_CONSOLE_32: + strncpy(s, "CONSOLE_32", n); + break; + case SUBSYS_CONSOLE_33: + strncpy(s, "CONSOLE_33", n); + break; + case SUBSYS_CONSOLE_34: + strncpy(s, "CONSOLE_34", n); + break; + case SUBSYS_CONSOLE_35: + strncpy(s, "CONSOLE_35", n); + break; + case SUBSYS_CONSOLE_36: + strncpy(s, "CONSOLE_36", n); + break; + case SUBSYS_CONSOLE_37: + strncpy(s, "CONSOLE_37", n); + break; + case SUBSYS_CONSOLE_38: + strncpy(s, "CONSOLE_38", n); + break; + case SUBSYS_CONSOLE_39: + strncpy(s, "CONSOLE_39", n); + break; + case SUBSYS_CONSOLE_40: + strncpy(s, "CONSOLE_40", n); + break; + case SUBSYS_CONSOLE_41: + strncpy(s, "CONSOLE_41", n); + break; + case SUBSYS_CONSOLE_42: + strncpy(s, "CONSOLE_42", n); + break; + case SUBSYS_CONSOLE_43: + strncpy(s, "CONSOLE_43", n); + break; + case SUBSYS_CONSOLE_44: + strncpy(s, "CONSOLE_44", n); + break; + case SUBSYS_CONSOLE_45: + strncpy(s, "CONSOLE_45", n); + break; + case SUBSYS_CONSOLE_46: + strncpy(s, "CONSOLE_46", n); + break; + default: + subsys_unknown_to_s(subsys, s, n); + break; + } + s[n - 1] = '\0'; + return s; +} + +#endif diff --git a/kernel/drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h b/kernel/drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h new file mode 100644 index 000000000..57dd93e0c --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/iovmcall_gnuc.h @@ -0,0 +1,49 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* Linux GCC Version (32-bit and 64-bit) */ +static inline unsigned long +__unisys_vmcall_gnuc(unsigned long tuple, unsigned long reg_ebx, + unsigned long reg_ecx) +{ + unsigned long result = 0; + unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx; + + cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx); + if (!(cpuid_ecx & 0x80000000)) + return -1; + + __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) : + "a"(tuple), "b"(reg_ebx), "c"(reg_ecx)); + return result; +} + +static inline unsigned long +__unisys_extended_vmcall_gnuc(unsigned long long tuple, + unsigned long long reg_ebx, + unsigned long long reg_ecx, + unsigned long long reg_edx) +{ + unsigned long result = 0; + unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx; + + cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx); + if (!(cpuid_ecx & 0x80000000)) + return -1; + + __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) : + "a"(tuple), "b"(reg_ebx), "c"(reg_ecx), "d"(reg_edx)); + return result; +} diff --git a/kernel/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h b/kernel/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h new file mode 100644 index 000000000..9b6d3e693 --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h @@ -0,0 +1,213 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __VBUSDEVICEINFO_H__ +#define __VBUSDEVICEINFO_H__ + +#include + +#pragma pack(push, 1) /* both GCC and VC now allow this pragma */ + +/* An array of this struct is present in the channel area for each vbus. + * (See vbuschannel.h.) + * It is filled in by the client side to provide info about the device + * and driver from the client's perspective. + */ +struct ultra_vbus_deviceinfo { + u8 devtype[16]; /* short string identifying the device type */ + u8 drvname[16]; /* driver .sys file name */ + u8 infostrs[96]; /* sequence of tab-delimited id strings: */ + /* */ + u8 reserved[128]; /* pad size to 256 bytes */ +}; + +#pragma pack(pop) + +/* Reads chars from the buffer at for bytes, and writes to + * the buffer at

, which is bytes long, ensuring never to + * overflow the buffer at

, using the following rules: + * - printable characters are simply copied from the buffer at to the + * buffer at

+ * - intervening streaks of non-printable characters in the buffer at + * are replaced with a single space in the buffer at

+ * Note that we pay no attention to '\0'-termination. + * Returns the number of bytes written to

. + * + * Pass

== NULL and == 0 for this special behavior. In this + * case, we simply return the number of bytes that WOULD HAVE been written + * to a buffer at

, had it been infinitely big. + */ +static inline int +vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax) +{ + int chars = 0; + int nonprintable_streak = 0; + + while (srcmax > 0) { + if ((*src >= ' ') && (*src < 0x7f)) { + if (nonprintable_streak) { + if (remain > 0) { + *p = ' '; + p++; + remain--; + chars++; + } else if (p == NULL) { + chars++; + } + nonprintable_streak = 0; + } + if (remain > 0) { + *p = *src; + p++; + remain--; + chars++; + } else if (p == NULL) { + chars++; + } + } else { + nonprintable_streak = 1; + } + src++; + srcmax--; + } + return chars; +} + +#define VBUSCHANNEL_ADDACHAR(ch, p, remain, chars) \ + do { \ + if (remain <= 0) \ + break; \ + *p = ch; \ + p++; chars++; remain--; \ + } while (0) + +/* Converts the non-negative value at to an ascii decimal string + * at

, writing at most bytes. Note there is NO '\0' termination + * written to

. + * + * Returns the number of bytes written to

. + * + * Note that we create this function because we need to do this operation in + * an environment-independent way (since we are in a common header file). + */ +static inline int +vbuschannel_itoa(char *p, int remain, int num) +{ + int digits = 0; + char s[32]; + int i; + + if (num == 0) { + /* '0' is a special case */ + if (remain <= 0) + return 0; + *p = '0'; + return 1; + } + /* form a backwards decimal ascii string in */ + while (num > 0) { + if (digits >= (int)sizeof(s)) + return 0; + s[digits++] = (num % 10) + '0'; + num = num / 10; + } + if (remain < digits) { + /* not enough room left at

to hold number, so fill with + * '?' */ + for (i = 0; i < remain; i++, p++) + *p = '?'; + return remain; + } + /* plug in the decimal ascii string representing the number, by */ + /* reversing the string we just built in */ + i = digits; + while (i > 0) { + i--; + *p = s[i]; + p++; + } + return digits; +} + +/* Reads , and converts its contents to a printable string at

, + * writing at most bytes. Note there is NO '\0' termination + * written to

. + * + * Pass >= 0 if you want a device index presented. + * + * Returns the number of bytes written to

. + */ +static inline int +vbuschannel_devinfo_to_string(struct ultra_vbus_deviceinfo *devinfo, + char *p, int remain, int devix) +{ + char *psrc; + int nsrc, x, i, pad; + int chars = 0; + + psrc = &devinfo->devtype[0]; + nsrc = sizeof(devinfo->devtype); + if (vbuschannel_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0) + return 0; + + /* emit device index */ + if (devix >= 0) { + VBUSCHANNEL_ADDACHAR('[', p, remain, chars); + x = vbuschannel_itoa(p, remain, devix); + p += x; + remain -= x; + chars += x; + VBUSCHANNEL_ADDACHAR(']', p, remain, chars); + } else { + VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); + VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); + VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); + } + + /* emit device type */ + x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc); + p += x; + remain -= x; + chars += x; + pad = 15 - x; /* pad device type to be exactly 15 chars */ + for (i = 0; i < pad; i++) + VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); + VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); + + /* emit driver name */ + psrc = &devinfo->drvname[0]; + nsrc = sizeof(devinfo->drvname); + x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc); + p += x; + remain -= x; + chars += x; + pad = 15 - x; /* pad driver name to be exactly 15 chars */ + for (i = 0; i < pad; i++) + VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); + VBUSCHANNEL_ADDACHAR(' ', p, remain, chars); + + /* emit strings */ + psrc = &devinfo->infostrs[0]; + nsrc = sizeof(devinfo->infostrs); + x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc); + p += x; + remain -= x; + chars += x; + VBUSCHANNEL_ADDACHAR('\n', p, remain, chars); + + return chars; +} + +#endif diff --git a/kernel/drivers/staging/unisys/common-spar/include/version.h b/kernel/drivers/staging/unisys/common-spar/include/version.h new file mode 100644 index 000000000..83d1da7a2 --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/version.h @@ -0,0 +1,45 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* version.h */ + +/* Common version/release info needed by all components goes here. + * (This file must compile cleanly in all environments.) + * Ultimately, this will be combined with defines generated dynamically as + * part of the sysgen, and some of the defines below may in fact end up + * being replaced with dynamically generated ones. + */ +#ifndef __VERSION_H__ +#define __VERSION_H__ + +#define SPARVER1 "1" +#define SPARVER2 "0" +#define SPARVER3 "0" +#define SPARVER4 "0" + +#define VERSION SPARVER1 "." SPARVER2 "." SPARVER3 "." SPARVER4 + +/* Here are various version forms needed in Windows environments. + */ +#define VISOR_PRODUCTVERSION SPARVERCOMMA +#define VISOR_PRODUCTVERSION_STR SPARVER1 "." SPARVER2 "." SPARVER3 "." \ + SPARVER4 +#define VISOR_OBJECTVERSION_STR SPARVER1 "," SPARVER2 "," SPARVER3 "," \ + SPARVER4 + +#define COPYRIGHT "Unisys Corporation" +#define COPYRIGHTDATE "2010 - 2013" + +#endif diff --git a/kernel/drivers/staging/unisys/common-spar/include/vmcallinterface.h b/kernel/drivers/staging/unisys/common-spar/include/vmcallinterface.h new file mode 100644 index 000000000..59a7459eb --- /dev/null +++ b/kernel/drivers/staging/unisys/common-spar/include/vmcallinterface.h @@ -0,0 +1,163 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __IOMONINTF_H__ +#define __IOMONINTF_H__ + +/* +* This file contains all structures needed to support the VMCALLs for IO +* Virtualization. The VMCALLs are provided by Monitor and used by IO code +* running on IO Partitions. +*/ + +#ifdef __GNUC__ +#include "iovmcall_gnuc.h" +#endif /* */ +#include "diagchannel.h" + +#ifdef VMCALL_IO_CONTROLVM_ADDR +#undef VMCALL_IO_CONTROLVM_ADDR +#endif /* */ + +/* define subsystem number for AppOS, used in uislib driver */ +#define MDS_APPOS 0x4000000000000000L /* subsystem = 62 - AppOS */ +enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */ + /* Note: when a new VMCALL is added: + * - the 1st 2 hex digits correspond to one of the + * VMCALL_MONITOR_INTERFACE types and + * - the next 2 hex digits are the nth relative instance of within a + * type + * E.G. for VMCALL_VIRTPART_RECYCLE_PART, + * - the 0x02 identifies it as a VMCALL_VIRTPART type and + * - the 0x01 identifies it as the 1st instance of a VMCALL_VIRTPART + * type of VMCALL + */ + + VMCALL_IO_CONTROLVM_ADDR = 0x0501, /* used by all Guests, not just + * IO */ + VMCALL_IO_DIAG_ADDR = 0x0508, + VMCALL_IO_VISORSERIAL_ADDR = 0x0509, + VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET = 0x0708, /* Allow caller to + * query virtual time + * offset */ + VMCALL_CHANNEL_VERSION_MISMATCH = 0x0709, + VMCALL_POST_CODE_LOGEVENT = 0x070B, /* LOGEVENT Post Code (RDX) with + * specified subsystem mask (RCX + * - monitor_subsystems.h) and + * severity (RDX) */ + VMCALL_GENERIC_SURRENDER_QUANTUM_FOREVER = 0x0802, /* Yield the + * remainder & all + * future quantums of + * the caller */ + VMCALL_MEASUREMENT_DO_NOTHING = 0x0901, + VMCALL_UPDATE_PHYSICAL_TIME = 0x0a02 /* Allow + * ULTRA_SERVICE_CAPABILITY_TIME + * capable guest to make + * VMCALL */ +}; + +#define VMCALL_SUCCESS 0 +#define VMCALL_SUCCESSFUL(result) (result == 0) + +#ifdef __GNUC__ +#define unisys_vmcall(tuple, reg_ebx, reg_ecx) \ + __unisys_vmcall_gnuc(tuple, reg_ebx, reg_ecx) +#define unisys_extended_vmcall(tuple, reg_ebx, reg_ecx, reg_edx) \ + __unisys_extended_vmcall_gnuc(tuple, reg_ebx, reg_ecx, reg_edx) +#define ISSUE_IO_VMCALL(method, param, result) \ + (result = unisys_vmcall(method, (param) & 0xFFFFFFFF, \ + (param) >> 32)) +#define ISSUE_IO_EXTENDED_VMCALL(method, param1, param2, param3) \ + unisys_extended_vmcall(method, param1, param2, param3) + + /* The following uses VMCALL_POST_CODE_LOGEVENT interface but is currently + * not used much */ +#define ISSUE_IO_VMCALL_POSTCODE_SEVERITY(postcode, severity) \ +do { \ + ISSUE_IO_EXTENDED_VMCALL(VMCALL_POST_CODE_LOGEVENT, severity, \ + MDS_APPOS, postcode); \ +} while (0) +#endif + +/* Structures for IO VMCALLs */ + +/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */ +/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */ +#pragma pack(push, 1) +struct phys_info { + u64 pi_pfn; + u16 pi_off; + u16 pi_len; +}; + +#pragma pack(pop) +/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */ + +/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */ +/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */ +#pragma pack(push, 1) +/* Parameters to VMCALL_IO_CONTROLVM_ADDR interface */ +struct vmcall_io_controlvm_addr_params { + /* The Guest-relative physical address of the ControlVm channel. + * This VMCall fills this in with the appropriate address. */ + u64 address; /* contents provided by this VMCALL (OUT) */ + /* the size of the ControlVm channel in bytes This VMCall fills this + * in with the appropriate address. */ + u32 channel_bytes; /* contents provided by this VMCALL (OUT) */ + u8 unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */ +}; + +#pragma pack(pop) +/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */ + +/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */ +/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */ +#pragma pack(push, 1) +/* Parameters to VMCALL_IO_DIAG_ADDR interface */ +struct vmcall_io_diag_addr_params { + /* The Guest-relative physical address of the diagnostic channel. + * This VMCall fills this in with the appropriate address. */ + u64 address; /* contents provided by this VMCALL (OUT) */ +}; + +#pragma pack(pop) +/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */ + +/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */ +/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */ +#pragma pack(push, 1) +/* Parameters to VMCALL_IO_VISORSERIAL_ADDR interface */ +struct vmcall_io_visorserial_addr_params { + /* The Guest-relative physical address of the serial console + * channel. This VMCall fills this in with the appropriate + * address. */ + u64 address; /* contents provided by this VMCALL (OUT) */ +}; + +#pragma pack(pop) +/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */ + +/* Parameters to VMCALL_CHANNEL_MISMATCH interface */ +struct vmcall_channel_version_mismatch_params { + u8 chname[32]; /* Null terminated string giving name of channel + * (IN) */ + u8 item_name[32]; /* Null terminated string giving name of + * mismatched item (IN) */ + u32 line_no; /* line# where invoked. (IN) */ + u8 file_name[36]; /* source code where invoked - Null terminated + * string (IN) */ +}; + +#endif /* __IOMONINTF_H__ */ diff --git a/kernel/drivers/staging/unisys/include/guestlinuxdebug.h b/kernel/drivers/staging/unisys/include/guestlinuxdebug.h new file mode 100644 index 000000000..957a627d0 --- /dev/null +++ b/kernel/drivers/staging/unisys/include/guestlinuxdebug.h @@ -0,0 +1,180 @@ +/* Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __GUESTLINUXDEBUG_H__ +#define __GUESTLINUXDEBUG_H__ + +/* +* This file contains supporting interface for "vmcallinterface.h", particularly +* regarding adding additional structure and functionality to linux +* ISSUE_IO_VMCALL_POSTCODE_SEVERITY */ + +/******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/ +#include "vmcallinterface.h" +enum driver_pc { /* POSTCODE driver identifier tuples */ + /* visorchipset driver files */ + VISOR_CHIPSET_PC = 0xA0, + VISOR_CHIPSET_PC_controlvm_c = 0xA1, + VISOR_CHIPSET_PC_controlvm_cm2 = 0xA2, + VISOR_CHIPSET_PC_controlvm_direct_c = 0xA3, + VISOR_CHIPSET_PC_file_c = 0xA4, + VISOR_CHIPSET_PC_parser_c = 0xA5, + VISOR_CHIPSET_PC_testing_c = 0xA6, + VISOR_CHIPSET_PC_visorchipset_main_c = 0xA7, + VISOR_CHIPSET_PC_visorswitchbus_c = 0xA8, + /* visorbus driver files */ + VISOR_BUS_PC = 0xB0, + VISOR_BUS_PC_businst_attr_c = 0xB1, + VISOR_BUS_PC_channel_attr_c = 0xB2, + VISOR_BUS_PC_devmajorminor_attr_c = 0xB3, + VISOR_BUS_PC_visorbus_main_c = 0xB4, + /* visorclientbus driver files */ + VISOR_CLIENT_BUS_PC = 0xC0, + VISOR_CLIENT_BUS_PC_visorclientbus_main_c = 0xC1, + /* virt hba driver files */ + VIRT_HBA_PC = 0xC2, + VIRT_HBA_PC_virthba_c = 0xC3, + /* virtpci driver files */ + VIRT_PCI_PC = 0xC4, + VIRT_PCI_PC_virtpci_c = 0xC5, + /* virtnic driver files */ + VIRT_NIC_PC = 0xC6, + VIRT_NIC_P_virtnic_c = 0xC7, + /* uislib driver files */ + UISLIB_PC = 0xD0, + UISLIB_PC_uislib_c = 0xD1, + UISLIB_PC_uisqueue_c = 0xD2, + UISLIB_PC_uisthread_c = 0xD3, + UISLIB_PC_uisutils_c = 0xD4, +}; + +enum event_pc { /* POSTCODE event identifier tuples */ + ATTACH_PORT_ENTRY_PC = 0x001, + ATTACH_PORT_FAILURE_PC = 0x002, + ATTACH_PORT_SUCCESS_PC = 0x003, + BUS_FAILURE_PC = 0x004, + BUS_CREATE_ENTRY_PC = 0x005, + BUS_CREATE_FAILURE_PC = 0x006, + BUS_CREATE_EXIT_PC = 0x007, + BUS_CONFIGURE_ENTRY_PC = 0x008, + BUS_CONFIGURE_FAILURE_PC = 0x009, + BUS_CONFIGURE_EXIT_PC = 0x00A, + CHIPSET_INIT_ENTRY_PC = 0x00B, + CHIPSET_INIT_SUCCESS_PC = 0x00C, + CHIPSET_INIT_FAILURE_PC = 0x00D, + CHIPSET_INIT_EXIT_PC = 0x00E, + CREATE_WORKQUEUE_PC = 0x00F, + CREATE_WORKQUEUE_FAILED_PC = 0x0A0, + CONTROLVM_INIT_FAILURE_PC = 0x0A1, + DEVICE_CREATE_ENTRY_PC = 0x0A2, + DEVICE_CREATE_FAILURE_PC = 0x0A3, + DEVICE_CREATE_SUCCESS_PC = 0x0A4, + DEVICE_CREATE_EXIT_PC = 0x0A5, + DEVICE_ADD_PC = 0x0A6, + DEVICE_REGISTER_FAILURE_PC = 0x0A7, + DEVICE_CHANGESTATE_ENTRY_PC = 0x0A8, + DEVICE_CHANGESTATE_FAILURE_PC = 0x0A9, + DEVICE_CHANGESTATE_EXIT_PC = 0x0AA, + DRIVER_ENTRY_PC = 0x0AB, + DRIVER_EXIT_PC = 0x0AC, + MALLOC_FAILURE_PC = 0x0AD, + QUEUE_DELAYED_WORK_PC = 0x0AE, + UISLIB_THREAD_FAILURE_PC = 0x0B7, + VBUS_CHANNEL_ENTRY_PC = 0x0B8, + VBUS_CHANNEL_FAILURE_PC = 0x0B9, + VBUS_CHANNEL_EXIT_PC = 0x0BA, + VHBA_CREATE_ENTRY_PC = 0x0BB, + VHBA_CREATE_FAILURE_PC = 0x0BC, + VHBA_CREATE_EXIT_PC = 0x0BD, + VHBA_CREATE_SUCCESS_PC = 0x0BE, + VHBA_COMMAND_HANDLER_PC = 0x0BF, + VHBA_PROBE_ENTRY_PC = 0x0C0, + VHBA_PROBE_FAILURE_PC = 0x0C1, + VHBA_PROBE_EXIT_PC = 0x0C2, + VNIC_CREATE_ENTRY_PC = 0x0C3, + VNIC_CREATE_FAILURE_PC = 0x0C4, + VNIC_CREATE_SUCCESS_PC = 0x0C5, + VNIC_PROBE_ENTRY_PC = 0x0C6, + VNIC_PROBE_FAILURE_PC = 0x0C7, + VNIC_PROBE_EXIT_PC = 0x0C8, + VPCI_CREATE_ENTRY_PC = 0x0C9, + VPCI_CREATE_FAILURE_PC = 0x0CA, + VPCI_CREATE_EXIT_PC = 0x0CB, + VPCI_PROBE_ENTRY_PC = 0x0CC, + VPCI_PROBE_FAILURE_PC = 0x0CD, + VPCI_PROBE_EXIT_PC = 0x0CE, + CRASH_DEV_ENTRY_PC = 0x0CF, + CRASH_DEV_EXIT_PC = 0x0D0, + CRASH_DEV_HADDR_NULL = 0x0D1, + CRASH_DEV_CONTROLVM_NULL = 0x0D2, + CRASH_DEV_RD_BUS_FAIULRE_PC = 0x0D3, + CRASH_DEV_RD_DEV_FAIULRE_PC = 0x0D4, + CRASH_DEV_BUS_NULL_FAILURE_PC = 0x0D5, + CRASH_DEV_DEV_NULL_FAILURE_PC = 0x0D6, + CRASH_DEV_CTRL_RD_FAILURE_PC = 0x0D7, + CRASH_DEV_COUNT_FAILURE_PC = 0x0D8, + SAVE_MSG_BUS_FAILURE_PC = 0x0D9, + SAVE_MSG_DEV_FAILURE_PC = 0x0DA, + CALLHOME_INIT_FAILURE_PC = 0x0DB +}; + +#ifdef __GNUC__ + +#define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR +#define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING +#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT /* TODO-> Info currently + * doesnt show, so we + * set info=warning */ +/* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR); + * Please also note that the resulting postcode is in hex, so if you are + * searching for the __LINE__ number, convert it first to decimal. The line + * number combined with driver and type of call, will allow you to track down + * exactly what line an error occurred on, or where the last driver + * entered/exited from. + */ + +/* BASE FUNCTIONS */ +#define POSTCODE_LINUX_A(DRIVER_PC, EVENT_PC, pc32bit, severity) \ +do { \ + unsigned long long post_code_temp; \ + post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \ + ((((u64)__LINE__) & 0xFFF) << 32) | \ + (((u64)pc32bit) & 0xFFFFFFFF); \ + ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \ +} while (0) + +#define POSTCODE_LINUX_B(DRIVER_PC, EVENT_PC, pc16bit1, pc16bit2, severity) \ +do { \ + unsigned long long post_code_temp; \ + post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \ + ((((u64)__LINE__) & 0xFFF) << 32) | \ + ((((u64)pc16bit1) & 0xFFFF) << 16) | \ + (((u64)pc16bit2) & 0xFFFF); \ + ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity); \ +} while (0) + +/* MOST COMMON */ +#define POSTCODE_LINUX_2(EVENT_PC, severity) \ + POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, 0x0000, severity) + +#define POSTCODE_LINUX_3(EVENT_PC, pc32bit, severity) \ + POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, pc32bit, severity) + +#define POSTCODE_LINUX_4(EVENT_PC, pc16bit1, pc16bit2, severity) \ + POSTCODE_LINUX_B(CURRENT_FILE_PC, EVENT_PC, pc16bit1, \ + pc16bit2, severity) + +#endif +#endif diff --git a/kernel/drivers/staging/unisys/include/periodic_work.h b/kernel/drivers/staging/unisys/include/periodic_work.h new file mode 100644 index 000000000..26ec10bdf --- /dev/null +++ b/kernel/drivers/staging/unisys/include/periodic_work.h @@ -0,0 +1,38 @@ +/* periodic_work.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __PERIODIC_WORK_H__ +#define __PERIODIC_WORK_H__ + +#include "timskmod.h" + +/* PERIODIC_WORK an opaque structure to users. + * Fields are declared only in the implementation .c files. + */ +struct periodic_work; + +struct periodic_work *visor_periodic_work_create(ulong jiffy_interval, + struct workqueue_struct *workqueue, + void (*workfunc)(void *), + void *workfuncarg, + const char *devnam); +void visor_periodic_work_destroy(struct periodic_work *pw); +BOOL visor_periodic_work_nextperiod(struct periodic_work *pw); +BOOL visor_periodic_work_start(struct periodic_work *pw); +BOOL visor_periodic_work_stop(struct periodic_work *pw); + +#endif diff --git a/kernel/drivers/staging/unisys/include/procobjecttree.h b/kernel/drivers/staging/unisys/include/procobjecttree.h new file mode 100644 index 000000000..809c67942 --- /dev/null +++ b/kernel/drivers/staging/unisys/include/procobjecttree.h @@ -0,0 +1,47 @@ +/* procobjecttree.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/** @file ********************************************************************* + * + * This describes the interfaces necessary for creating a tree of types, + * objects, and properties in /proc. + * + ****************************************************************************** + */ + +#ifndef __PROCOBJECTTREE_H__ +#define __PROCOBJECTTREE_H__ + +#include "timskmod.h" + +/* These are opaque structures to users. + * Fields are declared only in the implementation .c files. + */ +typedef struct MYPROCOBJECT_Tag MYPROCOBJECT; +typedef struct MYPROCTYPE_Tag MYPROCTYPE; + +MYPROCOBJECT *visor_proc_CreateObject(MYPROCTYPE *type, const char *name, + void *context); +void visor_proc_DestroyObject(MYPROCOBJECT *obj); +MYPROCTYPE *visor_proc_CreateType(struct proc_dir_entry *procRootDir, + const char **name, + const char **propertyNames, + void (*show_property)(struct seq_file *, + void *, int)); +void visor_proc_DestroyType(MYPROCTYPE *type); + +#endif diff --git a/kernel/drivers/staging/unisys/include/sparstop.h b/kernel/drivers/staging/unisys/include/sparstop.h new file mode 100644 index 000000000..05837399a --- /dev/null +++ b/kernel/drivers/staging/unisys/include/sparstop.h @@ -0,0 +1,30 @@ +/* sparstop.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __SPARSTOP_H__ +#define __SPARSTOP_H__ + +#include "timskmod.h" +#include "version.h" +#include + +typedef void (*SPARSTOP_COMPLETE_FUNC) (void *context, int status); + +int sp_stop(void *context, SPARSTOP_COMPLETE_FUNC get_complete_func); +void test_remove_stop_device(void); + +#endif diff --git a/kernel/drivers/staging/unisys/include/timskmod.h b/kernel/drivers/staging/unisys/include/timskmod.h new file mode 100644 index 000000000..cde2494ad --- /dev/null +++ b/kernel/drivers/staging/unisys/include/timskmod.h @@ -0,0 +1,153 @@ +/* timskmod.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __TIMSKMOD_H__ +#define __TIMSKMOD_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* #define EXPORT_SYMTAB */ +#include +#include +#include +#include +#include +#include +#include + +/* #define DEBUG */ +#ifndef BOOL +#define BOOL int +#endif +#define FALSE 0 +#define TRUE 1 +#if !defined SUCCESS +#define SUCCESS 0 +#endif +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define STRUCTSEQUAL(x, y) (memcmp(&x, &y, sizeof(x)) == 0) +#ifndef HOSTADDRESS +#define HOSTADDRESS unsigned long long +#endif + +#define sizeofmember(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER)) +/** "Covered quotient" function */ +#define COVQ(v, d) (((v) + (d) - 1) / (d)) +#define SWAPPOINTERS(p1, p2) \ + do { \ + void *SWAPPOINTERS_TEMP = (void *)p1; \ + (void *)(p1) = (void *)(p2); \ + (void *)(p2) = SWAPPOINTERS_TEMP; \ + } while (0) + +#define WARNDRV(fmt, args...) LOGWRN(fmt, ## args) +#define SECUREDRV(fmt, args...) LOGWRN(fmt, ## args) + +#define PRINTKDEV(devname, fmt, args...) LOGINFDEV(devname, fmt, ## args) +#define TBDDEV(devname, fmt, args...) LOGERRDEV(devname, fmt, ## args) +#define HUHDEV(devname, fmt, args...) LOGERRDEV(devname, fmt, ## args) +#define ERRDEV(devname, fmt, args...) LOGERRDEV(devname, fmt, ## args) +#define ERRDEVX(devno, fmt, args...) LOGERRDEVX(devno, fmt, ## args) +#define WARNDEV(devname, fmt, args...) LOGWRNDEV(devname, fmt, ## args) +#define SECUREDEV(devname, fmt, args...) LOGWRNDEV(devname, fmt, ## args) +#define INFODEV(devname, fmt, args...) LOGINFDEV(devname, fmt, ## args) +#define INFODEVX(devno, fmt, args...) LOGINFDEVX(devno, fmt, ## args) + +/** Verifies the consistency of your PRIVATEDEVICEDATA structure using + * conventional "signature" fields: + *

+ * - sig1 should contain the size of the structure + * - sig2 should contain a pointer to the beginning of the structure + */ +#define DDLOOKSVALID(dd) \ + ((dd != NULL) && \ + ((dd)->sig1 == sizeof(PRIVATEDEVICEDATA)) && \ + ((dd)->sig2 == dd)) + +/** Verifies the consistency of your PRIVATEFILEDATA structure using + * conventional "signature" fields: + *

+ * - sig1 should contain the size of the structure + * - sig2 should contain a pointer to the beginning of the structure + */ +#define FDLOOKSVALID(fd) \ + ((fd != NULL) && \ + ((fd)->sig1 == sizeof(PRIVATEFILEDATA)) && \ + ((fd)->sig2 == fd)) + +/** Sleep for an indicated number of seconds (for use in kernel mode). + * x - the number of seconds to sleep. + */ +#define SLEEP(x) \ + do { __set_current_state(TASK_INTERRUPTIBLE); \ + schedule_timeout((x)*HZ); \ + } while (0) + +/** Sleep for an indicated number of jiffies (for use in kernel mode). + * x - the number of jiffies to sleep. + */ +#define SLEEPJIFFIES(x) \ + do { __set_current_state(TASK_INTERRUPTIBLE); \ + schedule_timeout(x); \ + } while (0) + +static inline struct cdev *cdev_alloc_init(struct module *owner, + const struct file_operations *fops) +{ + struct cdev *cdev = NULL; + + cdev = cdev_alloc(); + if (!cdev) + return NULL; + cdev->ops = fops; + cdev->owner = owner; + + /* Note that the memory allocated for cdev will be deallocated + * when the usage count drops to 0, because it is controlled + * by a kobject of type ktype_cdev_dynamic. (This + * deallocation could very well happen outside of our kernel + * module, like via the cdev_put in __fput() for example.) + */ + return cdev; +} + +extern int unisys_spar_platform; + +#endif diff --git a/kernel/drivers/staging/unisys/include/uisqueue.h b/kernel/drivers/staging/unisys/include/uisqueue.h new file mode 100644 index 000000000..08ba16ea8 --- /dev/null +++ b/kernel/drivers/staging/unisys/include/uisqueue.h @@ -0,0 +1,396 @@ +/* uisqueue.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* + * Unisys IO Virtualization header NOTE: This file contains only Linux + * specific structs. All OS-independent structs are in iochannel.h.xx + */ + +#ifndef __UISQUEUE_H__ +#define __UISQUEUE_H__ + +#include "linux/version.h" +#include "iochannel.h" +#include +#include +#include + +#include "controlvmchannel.h" +#include "controlvmcompletionstatus.h" + +struct uisqueue_info { + struct channel_header __iomem *chan; + /* channel containing queues in which scsi commands & + * responses are queued + */ + u64 packets_sent; + u64 packets_received; + u64 interrupts_sent; + u64 interrupts_received; + u64 max_not_empty_cnt; + u64 total_wakeup_cnt; + u64 non_empty_wakeup_cnt; + + struct { + struct signal_queue_header reserved1; /* */ + struct signal_queue_header reserved2; /* */ + } safe_uis_queue; + unsigned int (*send_int_if_needed)(struct uisqueue_info *info, + unsigned int whichcqueue, + unsigned char issue_irq_if_empty, + u64 irq_handle, + unsigned char io_termination); +}; + +/* uisqueue_put_cmdrsp_with_lock_client queues a commmand or response + * to the specified queue, at the tail if the queue is full but + * oktowait == 0, then it return 0 indicating failure. otherwise it + * wait for the queue to become non-full. If command is queued, return + * 1 for success. + */ +#define DONT_ISSUE_INTERRUPT 0 +#define ISSUE_INTERRUPT 1 + +#define DONT_WAIT 0 +#define OK_TO_WAIT 1 +#define UISLIB_LOCK_PREFIX \ + ".section .smp_locks,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR "661f\n" /* address */ \ + ".previous\n" \ + "661:\n\tlock; " + +unsigned long long uisqueue_interlocked_or(unsigned long long __iomem *tgt, + unsigned long long set); +unsigned long long uisqueue_interlocked_and(unsigned long long __iomem *tgt, + unsigned long long set); + +int uisqueue_put_cmdrsp_with_lock_client(struct uisqueue_info *queueinfo, + struct uiscmdrsp *cmdrsp, + unsigned int queue, + void *insertlock, + unsigned char issue_irq_if_empty, + u64 irq_handle, + char oktowait, + u8 *channel_id); + +/* uisqueue_get_cmdrsp gets the cmdrsp entry at the head of the queue + * and copies it to the area pointed by cmdrsp param. + * returns 0 if queue is empty, 1 otherwise + */ +int + +uisqueue_get_cmdrsp(struct uisqueue_info *queueinfo, void *cmdrsp, + unsigned int queue); + +#define MAX_NAME_SIZE_UISQUEUE 64 + +struct extport_info { + u8 valid:1; + /* if 1, indicates this extport slot is occupied + * if 0, indicates that extport slot is unoccupied */ + + u32 num_devs_using; + /* When extport is added, this is set to 0. For exports + * located in NETWORK switches: + * Each time a VNIC, i.e., intport, is added to the switch this + * is used to assign a pref_pnic for the VNIC and when assigned + * to a VNIC this counter is incremented. When a VNIC is + * deleted, the extport corresponding to the VNIC's pref_pnic + * is located and its num_devs_using is decremented. For VNICs, + * num_devs_using is basically used to load-balance transmit + * traffic from VNICs. + */ + + struct switch_info *swtch; + struct pci_id pci_id; + char name[MAX_NAME_SIZE_UISQUEUE]; + union { + struct vhba_wwnn wwnn; + unsigned char macaddr[MAX_MACADDR_LEN]; + }; +}; + +struct device_info { + void __iomem *chanptr; + u64 channel_addr; + u64 channel_bytes; + uuid_le channel_uuid; + uuid_le instance_uuid; + struct irq_info intr; + struct switch_info *swtch; + char devid[30]; /* "vbus:dev" */ + u16 polling; + struct semaphore interrupt_callback_lock; + u32 bus_no; + u32 dev_no; + int (*interrupt)(void *); + void *interrupt_context; + void *private_data; + struct list_head list_polling_device_channels; + unsigned long long moved_to_tail_cnt; + unsigned long long first_busy_cnt; + unsigned long long last_on_list_cnt; +}; + +enum switch_type { + RECOVERY_LAN = 1, + IB_LAN = 2 +}; + +struct bus_info { + u32 bus_no, device_count; + struct device_info **device; + u64 guest_handle, recv_bus_irq_handle; + uuid_le bus_inst_uuid; + struct ultra_vbus_channel_protocol __iomem *bus_channel; + int bus_channel_bytes; + struct proc_dir_entry *proc_dir; /* proc/uislib/vbus/ */ + struct proc_dir_entry *proc_info; /* proc/uislib/vbus//info */ + char name[25]; + char partition_name[99]; + struct bus_info *next; + u8 local_vnic; /* 1 if local vnic created internally + * by IOVM; 0 otherwise... */ +}; + +struct sn_list_entry { + struct uisscsi_dest pdest; /* scsi bus, target, lun for + * phys disk */ + u8 sernum[MAX_SERIAL_NUM]; /* serial num of physical + * disk.. The length is always + * MAX_SERIAL_NUM, padded with + * spaces */ + struct sn_list_entry *next; +}; + +/* + * IO messages sent to UisnicControlChanFunc & UissdControlChanFunc by + * code that processes the ControlVm channel messages. + */ + +enum iopart_msg_type { + IOPART_ADD_VNIC, + IOPART_DEL_VNIC, + IOPART_DEL_ALL_VNICS, + IOPART_ADD_VHBA, + IOPART_ADD_VDISK, + IOPART_DEL_VHBA, + IOPART_DEL_VDISK, + IOPART_DEL_ALL_VDISKS_FOR_VHBA, + IOPART_DEL_ALL_VHBAS, + IOPART_ATTACH_PHBA, + IOPART_DETACH_PHBA, /* 10 */ + IOPART_ATTACH_PNIC, + IOPART_DETACH_PNIC, + IOPART_DETACH_VHBA, + IOPART_DETACH_VNIC, + IOPART_PAUSE_VDISK, + IOPART_RESUME_VDISK, + IOPART_ADD_DEVICE, /* add generic device */ + IOPART_DEL_DEVICE, /* del generic device */ +}; + +struct add_virt_iopart { + void *chanptr; /* pointer to data channel */ + u64 guest_handle; /* used to convert guest physical + * address to real physical address + * for DMA, for ex. */ + u64 recv_bus_irq_handle; /* used to register to receive + * bus level interrupts. */ + struct irq_info intr; /* contains recv & send + * interrupt info */ + /* recvInterruptHandle is used to register to receive + * interrupts on the data channel. Used by GuestLinux/Windows + * IO drivers to connect to interrupt. sendInterruptHandle is + * used by IOPart drivers as parameter to + * Issue_VMCALL_IO_QUEUE_TRANSITION to interrupt thread in + * guest linux/windows IO drivers when data channel queue for + * vhba/vnic goes from EMPTY to NON-EMPTY. */ + struct switch_info *swtch; /* pointer to the virtual + * switch to which the vnic is + * connected */ + + u8 use_g2g_copy; /* Used to determine if a virtual HBA + * needs to use G2G copy. */ + u8 filler[7]; + + u32 bus_no; + u32 dev_no; + char *params; + ulong params_bytes; + +}; + +struct add_vdisk_iopart { + void *chanptr; /* pointer to data channel */ + int implicit; + struct uisscsi_dest vdest; /* scsi bus, target, lun for virt disk */ + struct uisscsi_dest pdest; /* scsi bus, target, lun for phys disk */ + u8 sernum[MAX_SERIAL_NUM]; /* serial num of physical disk */ + u32 serlen; /* length of serial num */ +}; + +struct del_vdisk_iopart { + void *chanptr; /* pointer to data channel */ + struct uisscsi_dest vdest; /* scsi bus, target, lun for virt disk */ +}; + +struct del_virt_iopart { + void *chanptr; /* pointer to data channel */ +}; + +struct det_virt_iopart { /* detach internal port */ + void *chanptr; /* pointer to data channel */ + struct switch_info *swtch; +}; + +struct paures_vdisk_iopart { + void *chanptr; /* pointer to data channel */ + struct uisscsi_dest vdest; /* scsi bus, target, lun for virt disk */ +}; + +struct add_switch_iopart { /* add switch */ + struct switch_info *swtch; + char *params; + ulong params_bytes; +}; + +struct del_switch_iopart { /* destroy switch */ + struct switch_info *swtch; +}; + +struct io_msgs { + enum iopart_msg_type msgtype; + + /* additional params needed by some messages */ + union { + struct add_virt_iopart add_vhba; + struct add_virt_iopart add_vnic; + struct add_vdisk_iopart add_vdisk; + struct del_virt_iopart del_vhba; + struct del_virt_iopart del_vnic; + struct det_virt_iopart det_vhba; + struct det_virt_iopart det_vnic; + struct del_vdisk_iopart del_vdisk; + struct del_virt_iopart del_all_vdisks_for_vhba; + struct add_virt_iopart add_device; + struct del_virt_iopart del_device; + struct det_virt_iopart det_intport; + struct add_switch_iopart add_switch; + struct del_switch_iopart del_switch; + struct extport_info *ext_port; /* for attach or detach + * pnic/generic delete all + * vhbas/allvnics need no + * parameters */ + struct paures_vdisk_iopart paures_vdisk; + }; +}; + +/* +* Guest messages sent to VirtControlChanFunc by code that processes +* the ControlVm channel messages. +*/ + +enum guestpart_msg_type { + GUEST_ADD_VBUS, + GUEST_ADD_VHBA, + GUEST_ADD_VNIC, + GUEST_DEL_VBUS, + GUEST_DEL_VHBA, + GUEST_DEL_VNIC, + GUEST_DEL_ALL_VHBAS, + GUEST_DEL_ALL_VNICS, + GUEST_DEL_ALL_VBUSES, /* deletes all vhbas & vnics on all + * buses and deletes all buses */ + GUEST_PAUSE_VHBA, + GUEST_PAUSE_VNIC, + GUEST_RESUME_VHBA, + GUEST_RESUME_VNIC +}; + +struct add_vbus_guestpart { + void __iomem *chanptr; /* pointer to data channel for bus - + * NOT YET USED */ + u32 bus_no; /* bus number to be created/deleted */ + u32 dev_count; /* max num of devices on bus */ + uuid_le bus_uuid; /* indicates type of bus */ + uuid_le instance_uuid; /* instance guid for device */ +}; + +struct del_vbus_guestpart { + u32 bus_no; /* bus number to be deleted */ + /* once we start using the bus's channel, add can dump busNo + * into the channel header and then delete will need only one + * parameter, chanptr. */ +}; + +struct add_virt_guestpart { + void __iomem *chanptr; /* pointer to data channel */ + u32 bus_no; /* bus number for the operation */ + u32 device_no; /* number of device on the bus */ + uuid_le instance_uuid; /* instance guid for device */ + struct irq_info intr; /* recv/send interrupt info */ + /* recvInterruptHandle contains info needed in order to + * register to receive interrupts on the data channel. + * sendInterruptHandle contains handle which is provided to + * monitor VMCALL that will cause an interrupt to be generated + * for the other end. + */ +}; + +struct pause_virt_guestpart { + void __iomem *chanptr; /* pointer to data channel */ +}; + +struct resume_virt_guestpart { + void __iomem *chanptr; /* pointer to data channel */ +}; + +struct del_virt_guestpart { + void __iomem *chanptr; /* pointer to data channel */ +}; + +struct init_chipset_guestpart { + u32 bus_count; /* indicates the max number of busses */ + u32 switch_count; /* indicates the max number of switches */ +}; + +struct guest_msgs { + enum guestpart_msg_type msgtype; + + /* additional params needed by messages */ + union { + struct add_vbus_guestpart add_vbus; + struct add_virt_guestpart add_vhba; + struct add_virt_guestpart add_vnic; + struct pause_virt_guestpart pause_vhba; + struct pause_virt_guestpart pause_vnic; + struct resume_virt_guestpart resume_vhba; + struct resume_virt_guestpart resume_vnic; + struct del_vbus_guestpart del_vbus; + struct del_virt_guestpart del_vhba; + struct del_virt_guestpart del_vnic; + struct del_vbus_guestpart del_all_vhbas; + struct del_vbus_guestpart del_all_vnics; + /* del_all_vbuses needs no parameters */ + }; + struct init_chipset_guestpart init_chipset; + +}; + +#endif /* __UISQUEUE_H__ */ diff --git a/kernel/drivers/staging/unisys/include/uisthread.h b/kernel/drivers/staging/unisys/include/uisthread.h new file mode 100644 index 000000000..52c3eb4de --- /dev/null +++ b/kernel/drivers/staging/unisys/include/uisthread.h @@ -0,0 +1,42 @@ +/* uisthread.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/*****************************************************************************/ +/* Unisys thread utilities header */ +/*****************************************************************************/ + +#ifndef __UISTHREAD_H__ +#define __UISTHREAD_H__ + +#include "linux/completion.h" + +struct uisthread_info { + struct task_struct *task; + int id; + struct completion has_stopped; +}; + +/* returns 0 for failure, 1 for success */ +int uisthread_start( + struct uisthread_info *thrinfo, + int (*threadfn)(void *), + void *thrcontext, + char *name); + +void uisthread_stop(struct uisthread_info *thrinfo); + +#endif /* __UISTHREAD_H__ */ diff --git a/kernel/drivers/staging/unisys/include/uisutils.h b/kernel/drivers/staging/unisys/include/uisutils.h new file mode 100644 index 000000000..c7d0ba8aa --- /dev/null +++ b/kernel/drivers/staging/unisys/include/uisutils.h @@ -0,0 +1,299 @@ +/* uisutils.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* + * Unisys Virtual HBA utilities header + */ + +#ifndef __UISUTILS__H__ +#define __UISUTILS__H__ +#include +#include +#include +#include +#include +#include + +#include "vmcallinterface.h" +#include "channel.h" +#include "uisthread.h" +#include "uisqueue.h" +#include "diagnostics/appos_subsystems.h" +#include "vbusdeviceinfo.h" +#include + +/* This is the MAGIC number stuffed by virthba in host->this_id. Used to + * identify virtual hbas. + */ +#define UIS_MAGIC_VHBA 707 + +/* global function pointers that act as callback functions into + * uisnicmod, uissdmod, and virtpcimod + */ +extern int (*uisnic_control_chan_func)(struct io_msgs *); +extern int (*uissd_control_chan_func)(struct io_msgs *); +extern int (*virt_control_chan_func)(struct guest_msgs *); + +/* Return values of above callback functions: */ +#define CCF_ERROR 0 /* completed and failed */ +#define CCF_OK 1 /* completed successfully */ +#define CCF_PENDING 2 /* operation still pending */ +extern atomic_t uisutils_registered_services; + +struct req_handler_info { + uuid_le switch_uuid; + int (*controlfunc)(struct io_msgs *); + unsigned long min_channel_bytes; + int (*server_channel_ok)(unsigned long channel_bytes); + int (*server_channel_init)(void *x, unsigned char *client_str, + u32 client_str_len, u64 bytes); + char switch_type_name[99]; + struct list_head list_link; /* links into ReqHandlerInfo_list */ +}; + +struct req_handler_info *req_handler_find(uuid_le switch_uuid); + +#define uislib_ioremap_cache(addr, size) \ + dbg_ioremap_cache(addr, size, __FILE__, __LINE__) + +static inline void __iomem * +dbg_ioremap_cache(u64 addr, unsigned long size, char *file, int line) +{ + void __iomem *new; + + new = ioremap_cache(addr, size); + return new; +} + +#define uislib_ioremap(addr, size) dbg_ioremap(addr, size, __FILE__, __LINE__) + +static inline void * +dbg_ioremap(u64 addr, unsigned long size, char *file, int line) +{ + void *new; + + new = ioremap(addr, size); + return new; +} + +#define uislib_iounmap(addr) dbg_iounmap(addr, __FILE__, __LINE__) + +static inline void +dbg_iounmap(void __iomem *addr, char *file, int line) +{ + iounmap(addr); +} + +#define PROC_READ_BUFFER_SIZE 131072 /* size of the buffer to allocate to + * hold all of /proc/XXX/info */ +int uisutil_add_proc_line_ex(int *total, char **buffer, int *buffer_remaining, + char *format, ...); + +int uisctrl_register_req_handler(int type, void *fptr, + struct ultra_vbus_deviceinfo *chipset_driver_info); + +unsigned char *util_map_virt(struct phys_info *sg); +void util_unmap_virt(struct phys_info *sg); +unsigned char *util_map_virt_atomic(struct phys_info *sg); +void util_unmap_virt_atomic(void *buf); +int uislib_client_inject_add_bus(u32 bus_no, uuid_le inst_uuid, + u64 channel_addr, ulong n_channel_bytes); +int uislib_client_inject_del_bus(u32 bus_no); + +int uislib_client_inject_add_vhba(u32 bus_no, u32 dev_no, + u64 phys_chan_addr, u32 chan_bytes, + int is_test_addr, uuid_le inst_uuid, + struct irq_info *intr); +int uislib_client_inject_pause_vhba(u32 bus_no, u32 dev_no); +int uislib_client_inject_resume_vhba(u32 bus_no, u32 dev_no); +int uislib_client_inject_del_vhba(u32 bus_no, u32 dev_no); +int uislib_client_inject_add_vnic(u32 bus_no, u32 dev_no, + u64 phys_chan_addr, u32 chan_bytes, + int is_test_addr, uuid_le inst_uuid, + struct irq_info *intr); +int uislib_client_inject_pause_vnic(u32 bus_no, u32 dev_no); +int uislib_client_inject_resume_vnic(u32 bus_no, u32 dev_no); +int uislib_client_inject_del_vnic(u32 bus_no, u32 dev_no); +#ifdef STORAGE_CHANNEL +u64 uislib_storage_channel(int client_id); +#endif +int uislib_get_owned_pdest(struct uisscsi_dest *pdest); + +int uislib_send_event(enum controlvm_id id, + struct controlvm_message_packet *event); + +/* structure used by vhba & vnic to keep track of queue & thread info */ +struct chaninfo { + struct uisqueue_info *queueinfo; + /* this specifies the queue structures for a channel */ + /* ALLOCATED BY THE OTHER END - WE JUST GET A POINTER TO THE MEMORY */ + spinlock_t insertlock; + /* currently used only in virtnic when sending data to uisnic */ + /* to synchronize the inserts into the signal queue */ + struct uisthread_info threadinfo; + /* this specifies the thread structures used by the thread that */ + /* handles this channel */ +}; + +/* this is the wait code for all the threads - it is used to get +* something from a queue choices: wait_for_completion_interruptible, +* _timeout, interruptible_timeout +*/ +#define UIS_THREAD_WAIT_MSEC(x) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + schedule_timeout(msecs_to_jiffies(x)); \ +} + +#define UIS_THREAD_WAIT_USEC(x) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + schedule_timeout(usecs_to_jiffies(x)); \ +} + +#define UIS_THREAD_WAIT UIS_THREAD_WAIT_MSEC(5) + +#define UIS_THREAD_WAIT_SEC(x) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + schedule_timeout((x)*HZ); \ +} + +/* This is a hack until we fix IOVM to initialize the channel header + * correctly at DEVICE_CREATE time, INSTEAD OF waiting until + * DEVICE_CONFIGURE time. + */ +static inline void +wait_for_valid_guid(uuid_le __iomem *guid) +{ + uuid_le tmpguid; + + while (1) { + memcpy_fromio((void *)&tmpguid, + (void __iomem *)guid, sizeof(uuid_le)); + if (uuid_le_cmp(tmpguid, NULL_UUID_LE) != 0) + break; + UIS_THREAD_WAIT_SEC(5); + } +} + +static inline unsigned int +issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes) +{ + struct vmcall_io_controlvm_addr_params params; + int result = VMCALL_SUCCESS; + u64 physaddr; + + physaddr = virt_to_phys(¶ms); + ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result); + if (VMCALL_SUCCESSFUL(result)) { + *control_addr = params.address; + *control_bytes = params.channel_bytes; + } + return result; +} + +static inline unsigned int issue_vmcall_io_diag_addr(u64 *diag_channel_addr) +{ + struct vmcall_io_diag_addr_params params; + int result = VMCALL_SUCCESS; + u64 physaddr; + + physaddr = virt_to_phys(¶ms); + ISSUE_IO_VMCALL(VMCALL_IO_DIAG_ADDR, physaddr, result); + if (VMCALL_SUCCESSFUL(result)) + *diag_channel_addr = params.address; + return result; +} + +static inline unsigned int issue_vmcall_io_visorserial_addr(u64 *channel_addr) +{ + struct vmcall_io_visorserial_addr_params params; + int result = VMCALL_SUCCESS; + u64 physaddr; + + physaddr = virt_to_phys(¶ms); + ISSUE_IO_VMCALL(VMCALL_IO_VISORSERIAL_ADDR, physaddr, result); + if (VMCALL_SUCCESSFUL(result)) + *channel_addr = params.address; + return result; +} + +static inline s64 issue_vmcall_query_guest_virtual_time_offset(void) +{ + u64 result = VMCALL_SUCCESS; + u64 physaddr = 0; + + ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr, + result); + return result; +} + +struct log_info_t { + unsigned long long last_cycles; + unsigned long long delta_sum[64]; + unsigned long long delta_cnt[64]; + unsigned long long max_delta[64]; + unsigned long long min_delta[64]; +}; + +static inline int issue_vmcall_update_physical_time(u64 adjustment) +{ + int result = VMCALL_SUCCESS; + + ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result); + return result; +} + +static inline unsigned int issue_vmcall_channel_mismatch(const char *chname, + const char *item_name, u32 line_no, + const char *path_n_fn) +{ + struct vmcall_channel_version_mismatch_params params; + int result = VMCALL_SUCCESS; + u64 physaddr; + char *last_slash = NULL; + + strlcpy(params.chname, chname, sizeof(params.chname)); + strlcpy(params.item_name, item_name, sizeof(params.item_name)); + params.line_no = line_no; + + last_slash = strrchr(path_n_fn, '/'); + if (last_slash != NULL) { + last_slash++; + strlcpy(params.file_name, last_slash, sizeof(params.file_name)); + } else + strlcpy(params.file_name, + "Cannot determine source filename", + sizeof(params.file_name)); + + physaddr = virt_to_phys(¶ms); + ISSUE_IO_VMCALL(VMCALL_CHANNEL_VERSION_MISMATCH, physaddr, result); + return result; +} + +#define UIS_DAEMONIZE(nam) +void *uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln); +#define UISCACHEALLOC(cur_pool) uislib_cache_alloc(cur_pool, __FILE__, __LINE__) +void uislib_cache_free(struct kmem_cache *cur_pool, void *p, char *fn, int ln); +#define UISCACHEFREE(cur_pool, p) \ + uislib_cache_free(cur_pool, p, __FILE__, __LINE__) + +void uislib_enable_channel_interrupts(u32 bus_no, u32 dev_no, + int (*interrupt)(void *), + void *interrupt_context); +void uislib_disable_channel_interrupts(u32 bus_no, u32 dev_no); +void uislib_force_channel_interrupt(u32 bus_no, u32 dev_no); + +#endif /* __UISUTILS__H__ */ diff --git a/kernel/drivers/staging/unisys/include/vbushelper.h b/kernel/drivers/staging/unisys/include/vbushelper.h new file mode 100644 index 000000000..84abe5f99 --- /dev/null +++ b/kernel/drivers/staging/unisys/include/vbushelper.h @@ -0,0 +1,47 @@ +/* vbushelper.h + * + * Copyright (C) 2011 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __VBUSHELPER_H__ +#define __VBUSHELPER_H__ + +#include "vbusdeviceinfo.h" + +/* TARGET_HOSTNAME specified as -DTARGET_HOSTNAME=\"thename\" on the + * command line */ + +#define TARGET_HOSTNAME "linuxguest" + +static inline void bus_device_info_init( + struct ultra_vbus_deviceinfo *bus_device_info_ptr, + const char *dev_type, const char *drv_name, + const char *ver, const char *ver_tag) +{ + memset(bus_device_info_ptr, 0, sizeof(struct ultra_vbus_deviceinfo)); + snprintf(bus_device_info_ptr->devtype, + sizeof(bus_device_info_ptr->devtype), + "%s", (dev_type) ? dev_type : "unknownType"); + snprintf(bus_device_info_ptr->drvname, + sizeof(bus_device_info_ptr->drvname), + "%s", (drv_name) ? drv_name : "unknownDriver"); + snprintf(bus_device_info_ptr->infostrs, + sizeof(bus_device_info_ptr->infostrs), "%s\t%s\t%s", + (ver) ? ver : "unknownVer", + (ver_tag) ? ver_tag : "unknownVerTag", + TARGET_HOSTNAME); +} + +#endif diff --git a/kernel/drivers/staging/unisys/uislib/Kconfig b/kernel/drivers/staging/unisys/uislib/Kconfig new file mode 100644 index 000000000..c39a0a21a --- /dev/null +++ b/kernel/drivers/staging/unisys/uislib/Kconfig @@ -0,0 +1,10 @@ +# +# Unisys uislib configuration +# + +config UNISYS_UISLIB + tristate "Unisys uislib driver" + select UNISYS_VISORCHIPSET + ---help--- + If you say Y here, you will enable the Unisys uislib driver. + diff --git a/kernel/drivers/staging/unisys/uislib/Makefile b/kernel/drivers/staging/unisys/uislib/Makefile new file mode 100644 index 000000000..860f494f1 --- /dev/null +++ b/kernel/drivers/staging/unisys/uislib/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for Unisys uislib +# + +obj-$(CONFIG_UNISYS_UISLIB) += visoruislib.o + +visoruislib-y := uislib.o uisqueue.o uisthread.o uisutils.o + +ccflags-y += -Idrivers/staging/unisys/include +ccflags-y += -Idrivers/staging/unisys/visorchipset +ccflags-y += -Idrivers/staging/unisys/common-spar/include +ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels diff --git a/kernel/drivers/staging/unisys/uislib/uislib.c b/kernel/drivers/staging/unisys/uislib/uislib.c new file mode 100644 index 000000000..f93d0bb11 --- /dev/null +++ b/kernel/drivers/staging/unisys/uislib/uislib.c @@ -0,0 +1,1372 @@ +/* uislib.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* @ALL_INSPECTED */ +#define EXPORT_SYMTAB +#include +#include +#ifdef CONFIG_MODVERSIONS +#include +#endif +#include +#include + +#include +#include + +#include +#include "diagnostics/appos_subsystems.h" +#include "uisutils.h" +#include "vbuschannel.h" + +#include +#include /* for copy_from_user */ +#include /* for toupper */ +#include + +#include "sparstop.h" +#include "visorchipset.h" +#include "version.h" +#include "guestlinuxdebug.h" + +#define SET_PROC_OWNER(x, y) + +#define POLLJIFFIES_NORMAL 1 +/* Choose whether or not you want to wakeup the request-polling thread + * after an IO termination: + * this is shorter than using __FILE__ (full path name) in + * debug/info/error messages + */ +#define CURRENT_FILE_PC UISLIB_PC_uislib_c +#define __MYFILE__ "uislib.c" + +/* global function pointers that act as callback functions into virtpcimod */ +int (*virt_control_chan_func)(struct guest_msgs *); + +static int debug_buf_valid; +static char *debug_buf; /* Note this MUST be global, + * because the contents must */ +static unsigned int chipset_inited; + +#define WAIT_ON_CALLBACK(handle) \ + do { \ + if (handle) \ + break; \ + UIS_THREAD_WAIT; \ + } while (1) + +static struct bus_info *bus_list; +static rwlock_t bus_list_lock; +static int bus_list_count; /* number of buses in the list */ +static int max_bus_count; /* maximum number of buses expected */ +static u64 phys_data_chan; +static int platform_no; + +static struct uisthread_info incoming_ti; +static BOOL incoming_started = FALSE; +static LIST_HEAD(poll_dev_chan); +static unsigned long long tot_moved_to_tail_cnt; +static unsigned long long tot_wait_cnt; +static unsigned long long tot_wakeup_cnt; +static unsigned long long tot_schedule_cnt; +static int en_smart_wakeup = 1; +static DEFINE_SEMAPHORE(poll_dev_lock); /* unlocked */ +static DECLARE_WAIT_QUEUE_HEAD(poll_dev_wake_q); +static int poll_dev_start; + +#define CALLHOME_PROC_ENTRY_FN "callhome" +#define CALLHOME_THROTTLED_PROC_ENTRY_FN "callhome_throttled" + +#define DIR_DEBUGFS_ENTRY "uislib" +static struct dentry *dir_debugfs; + +#define PLATFORMNUMBER_DEBUGFS_ENTRY_FN "platform" +static struct dentry *platformnumber_debugfs_read; + +#define CYCLES_BEFORE_WAIT_DEBUGFS_ENTRY_FN "cycles_before_wait" +static struct dentry *cycles_before_wait_debugfs_read; + +#define SMART_WAKEUP_DEBUGFS_ENTRY_FN "smart_wakeup" +static struct dentry *smart_wakeup_debugfs_entry; + +#define INFO_DEBUGFS_ENTRY_FN "info" +static struct dentry *info_debugfs_entry; + +static unsigned long long cycles_before_wait, wait_cycles; + +/*****************************************************/ +/* local functions */ +/*****************************************************/ + +static ssize_t info_debugfs_read(struct file *file, char __user *buf, + size_t len, loff_t *offset); +static const struct file_operations debugfs_info_fops = { + .read = info_debugfs_read, +}; + +static void +init_msg_header(struct controlvm_message *msg, u32 id, uint rsp, uint svr) +{ + memset(msg, 0, sizeof(struct controlvm_message)); + msg->hdr.id = id; + msg->hdr.flags.response_expected = rsp; + msg->hdr.flags.server = svr; +} + +static __iomem void *init_vbus_channel(u64 ch_addr, u32 ch_bytes) +{ + void __iomem *ch = uislib_ioremap_cache(ch_addr, ch_bytes); + + if (!ch) + return NULL; + + if (!SPAR_VBUS_CHANNEL_OK_CLIENT(ch)) { + uislib_iounmap(ch); + return NULL; + } + return ch; +} + +static int +create_bus(struct controlvm_message *msg, char *buf) +{ + u32 bus_no, dev_count; + struct bus_info *tmp, *bus; + size_t size; + + if (max_bus_count == bus_list_count) { + POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, max_bus_count, + POSTCODE_SEVERITY_ERR); + return CONTROLVM_RESP_ERROR_MAX_BUSES; + } + + bus_no = msg->cmd.create_bus.bus_no; + dev_count = msg->cmd.create_bus.dev_count; + + POSTCODE_LINUX_4(BUS_CREATE_ENTRY_PC, bus_no, dev_count, + POSTCODE_SEVERITY_INFO); + + size = + sizeof(struct bus_info) + + (dev_count * sizeof(struct device_info *)); + bus = kzalloc(size, GFP_ATOMIC); + if (!bus) { + POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, + POSTCODE_SEVERITY_ERR); + return CONTROLVM_RESP_ERROR_KMALLOC_FAILED; + } + + /* Currently by default, the bus Number is the GuestHandle. + * Configure Bus message can override this. + */ + if (msg->hdr.flags.test_message) { + /* This implies we're the IOVM so set guest handle to 0... */ + bus->guest_handle = 0; + bus->bus_no = bus_no; + bus->local_vnic = 1; + } else { + bus->bus_no = bus_no; + bus->guest_handle = bus_no; + } + sprintf(bus->name, "%d", (int)bus->bus_no); + bus->device_count = dev_count; + bus->device = + (struct device_info **)((char *)bus + sizeof(struct bus_info)); + bus->bus_inst_uuid = msg->cmd.create_bus.bus_inst_uuid; + bus->bus_channel_bytes = 0; + bus->bus_channel = NULL; + + /* add bus to our bus list - but check for duplicates first */ + read_lock(&bus_list_lock); + for (tmp = bus_list; tmp; tmp = tmp->next) { + if (tmp->bus_no == bus->bus_no) + break; + } + read_unlock(&bus_list_lock); + if (tmp) { + /* found a bus already in the list with same bus_no - + * reject add + */ + POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no, + POSTCODE_SEVERITY_ERR); + kfree(bus); + return CONTROLVM_RESP_ERROR_ALREADY_DONE; + } + if ((msg->cmd.create_bus.channel_addr != 0) && + (msg->cmd.create_bus.channel_bytes != 0)) { + bus->bus_channel_bytes = msg->cmd.create_bus.channel_bytes; + bus->bus_channel = + init_vbus_channel(msg->cmd.create_bus.channel_addr, + msg->cmd.create_bus.channel_bytes); + } + /* the msg is bound for virtpci; send guest_msgs struct to callback */ + if (!msg->hdr.flags.server) { + struct guest_msgs cmd; + + cmd.msgtype = GUEST_ADD_VBUS; + cmd.add_vbus.bus_no = bus_no; + cmd.add_vbus.chanptr = bus->bus_channel; + cmd.add_vbus.dev_count = dev_count; + cmd.add_vbus.bus_uuid = msg->cmd.create_bus.bus_data_type_uuid; + cmd.add_vbus.instance_uuid = msg->cmd.create_bus.bus_inst_uuid; + if (!virt_control_chan_func) { + POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no, + POSTCODE_SEVERITY_ERR); + kfree(bus); + return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE; + } + if (!virt_control_chan_func(&cmd)) { + POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no, + POSTCODE_SEVERITY_ERR); + kfree(bus); + return + CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR; + } + } + + /* add bus at the head of our list */ + write_lock(&bus_list_lock); + if (!bus_list) { + bus_list = bus; + } else { + bus->next = bus_list; + bus_list = bus; + } + bus_list_count++; + write_unlock(&bus_list_lock); + + POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->bus_no, + POSTCODE_SEVERITY_INFO); + return CONTROLVM_RESP_SUCCESS; +} + +static int +destroy_bus(struct controlvm_message *msg, char *buf) +{ + int i; + struct bus_info *bus, *prev = NULL; + struct guest_msgs cmd; + u32 bus_no; + + bus_no = msg->cmd.destroy_bus.bus_no; + + read_lock(&bus_list_lock); + + bus = bus_list; + while (bus) { + if (bus->bus_no == bus_no) + break; + prev = bus; + bus = bus->next; + } + + if (!bus) { + read_unlock(&bus_list_lock); + return CONTROLVM_RESP_ERROR_ALREADY_DONE; + } + + /* verify that this bus has no devices. */ + for (i = 0; i < bus->device_count; i++) { + if (bus->device[i]) { + read_unlock(&bus_list_lock); + return CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED; + } + } + read_unlock(&bus_list_lock); + + if (msg->hdr.flags.server) + goto remove; + + /* client messages require us to call the virtpci callback associated + with this bus. */ + cmd.msgtype = GUEST_DEL_VBUS; + cmd.del_vbus.bus_no = bus_no; + if (!virt_control_chan_func) + return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE; + + if (!virt_control_chan_func(&cmd)) + return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR; + + /* finally, remove the bus from the list */ +remove: + write_lock(&bus_list_lock); + if (prev) /* not at head */ + prev->next = bus->next; + else + bus_list = bus->next; + bus_list_count--; + write_unlock(&bus_list_lock); + + if (bus->bus_channel) { + uislib_iounmap(bus->bus_channel); + bus->bus_channel = NULL; + } + + kfree(bus); + return CONTROLVM_RESP_SUCCESS; +} + +static int create_device(struct controlvm_message *msg, char *buf) +{ + struct device_info *dev; + struct bus_info *bus; + struct guest_msgs cmd; + u32 bus_no, dev_no; + int result = CONTROLVM_RESP_SUCCESS; + u64 min_size = MIN_IO_CHANNEL_SIZE; + struct req_handler_info *req_handler; + + bus_no = msg->cmd.create_device.bus_no; + dev_no = msg->cmd.create_device.dev_no; + + POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no, + POSTCODE_SEVERITY_INFO); + + dev = kzalloc(sizeof(*dev), GFP_ATOMIC); + if (!dev) { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + return CONTROLVM_RESP_ERROR_KMALLOC_FAILED; + } + + dev->channel_uuid = msg->cmd.create_device.data_type_uuid; + dev->intr = msg->cmd.create_device.intr; + dev->channel_addr = msg->cmd.create_device.channel_addr; + dev->bus_no = bus_no; + dev->dev_no = dev_no; + sema_init(&dev->interrupt_callback_lock, 1); /* unlocked */ + sprintf(dev->devid, "vbus%u:dev%u", (unsigned)bus_no, (unsigned)dev_no); + /* map the channel memory for the device. */ + if (msg->hdr.flags.test_message) { + dev->chanptr = (void __iomem *)__va(dev->channel_addr); + } else { + req_handler = req_handler_find(dev->channel_uuid); + if (req_handler) + /* generic service handler registered for this + * channel + */ + min_size = req_handler->min_channel_bytes; + if (min_size > msg->cmd.create_device.channel_bytes) { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, + bus_no, POSTCODE_SEVERITY_ERR); + result = CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL; + goto cleanup; + } + dev->chanptr = + uislib_ioremap_cache(dev->channel_addr, + msg->cmd.create_device.channel_bytes); + if (!dev->chanptr) { + result = CONTROLVM_RESP_ERROR_IOREMAP_FAILED; + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, + bus_no, POSTCODE_SEVERITY_ERR); + goto cleanup; + } + } + dev->instance_uuid = msg->cmd.create_device.dev_inst_uuid; + dev->channel_bytes = msg->cmd.create_device.channel_bytes; + + read_lock(&bus_list_lock); + for (bus = bus_list; bus; bus = bus->next) { + if (bus->bus_no != bus_no) + continue; + /* make sure the device number is valid */ + if (dev_no >= bus->device_count) { + result = CONTROLVM_RESP_ERROR_MAX_DEVICES; + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, + bus_no, POSTCODE_SEVERITY_ERR); + read_unlock(&bus_list_lock); + goto cleanup; + } + /* make sure this device is not already set */ + if (bus->device[dev_no]) { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, + dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + result = CONTROLVM_RESP_ERROR_ALREADY_DONE; + read_unlock(&bus_list_lock); + goto cleanup; + } + read_unlock(&bus_list_lock); + /* the msg is bound for virtpci; send + * guest_msgs struct to callback + */ + if (msg->hdr.flags.server) { + bus->device[dev_no] = dev; + POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no, + bus_no, POSTCODE_SEVERITY_INFO); + return CONTROLVM_RESP_SUCCESS; + } + if (uuid_le_cmp(dev->channel_uuid, + spar_vhba_channel_protocol_uuid) == 0) { + wait_for_valid_guid(&((struct channel_header __iomem *) + (dev->chanptr))->chtype); + if (!SPAR_VHBA_CHANNEL_OK_CLIENT(dev->chanptr)) { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, + dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID; + goto cleanup; + } + cmd.msgtype = GUEST_ADD_VHBA; + cmd.add_vhba.chanptr = dev->chanptr; + cmd.add_vhba.bus_no = bus_no; + cmd.add_vhba.device_no = dev_no; + cmd.add_vhba.instance_uuid = dev->instance_uuid; + cmd.add_vhba.intr = dev->intr; + } else if (uuid_le_cmp(dev->channel_uuid, + spar_vnic_channel_protocol_uuid) == 0) { + wait_for_valid_guid(&((struct channel_header __iomem *) + (dev->chanptr))->chtype); + if (!SPAR_VNIC_CHANNEL_OK_CLIENT(dev->chanptr)) { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, + dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID; + goto cleanup; + } + cmd.msgtype = GUEST_ADD_VNIC; + cmd.add_vnic.chanptr = dev->chanptr; + cmd.add_vnic.bus_no = bus_no; + cmd.add_vnic.device_no = dev_no; + cmd.add_vnic.instance_uuid = dev->instance_uuid; + cmd.add_vhba.intr = dev->intr; + } else { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, + bus_no, POSTCODE_SEVERITY_ERR); + result = CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN; + goto cleanup; + } + + if (!virt_control_chan_func) { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, + bus_no, POSTCODE_SEVERITY_ERR); + result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE; + goto cleanup; + } + + if (!virt_control_chan_func(&cmd)) { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, + bus_no, POSTCODE_SEVERITY_ERR); + result = + CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR; + goto cleanup; + } + + bus->device[dev_no] = dev; + POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no, + bus_no, POSTCODE_SEVERITY_INFO); + return CONTROLVM_RESP_SUCCESS; + } + read_unlock(&bus_list_lock); + + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + result = CONTROLVM_RESP_ERROR_BUS_INVALID; + +cleanup: + if (!msg->hdr.flags.test_message) { + uislib_iounmap(dev->chanptr); + dev->chanptr = NULL; + } + + kfree(dev); + return result; +} + +static int pause_device(struct controlvm_message *msg) +{ + u32 bus_no, dev_no; + struct bus_info *bus; + struct device_info *dev; + struct guest_msgs cmd; + int retval = CONTROLVM_RESP_SUCCESS; + + bus_no = msg->cmd.device_change_state.bus_no; + dev_no = msg->cmd.device_change_state.dev_no; + + read_lock(&bus_list_lock); + for (bus = bus_list; bus; bus = bus->next) { + if (bus->bus_no == bus_no) { + /* make sure the device number is valid */ + if (dev_no >= bus->device_count) { + retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID; + } else { + /* make sure this device exists */ + dev = bus->device[dev_no]; + if (!dev) { + retval = + CONTROLVM_RESP_ERROR_ALREADY_DONE; + } + } + break; + } + } + if (!bus) + retval = CONTROLVM_RESP_ERROR_BUS_INVALID; + + read_unlock(&bus_list_lock); + if (retval == CONTROLVM_RESP_SUCCESS) { + /* the msg is bound for virtpci; send + * guest_msgs struct to callback + */ + if (uuid_le_cmp(dev->channel_uuid, + spar_vhba_channel_protocol_uuid) == 0) { + cmd.msgtype = GUEST_PAUSE_VHBA; + cmd.pause_vhba.chanptr = dev->chanptr; + } else if (uuid_le_cmp(dev->channel_uuid, + spar_vnic_channel_protocol_uuid) == 0) { + cmd.msgtype = GUEST_PAUSE_VNIC; + cmd.pause_vnic.chanptr = dev->chanptr; + } else { + return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN; + } + if (!virt_control_chan_func) + return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE; + if (!virt_control_chan_func(&cmd)) { + return + CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR; + } + } + return retval; +} + +static int resume_device(struct controlvm_message *msg) +{ + u32 bus_no, dev_no; + struct bus_info *bus; + struct device_info *dev; + struct guest_msgs cmd; + int retval = CONTROLVM_RESP_SUCCESS; + + bus_no = msg->cmd.device_change_state.bus_no; + dev_no = msg->cmd.device_change_state.dev_no; + + read_lock(&bus_list_lock); + for (bus = bus_list; bus; bus = bus->next) { + if (bus->bus_no == bus_no) { + /* make sure the device number is valid */ + if (dev_no >= bus->device_count) { + retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID; + } else { + /* make sure this device exists */ + dev = bus->device[dev_no]; + if (!dev) { + retval = + CONTROLVM_RESP_ERROR_ALREADY_DONE; + } + } + break; + } + } + + if (!bus) + retval = CONTROLVM_RESP_ERROR_BUS_INVALID; + + read_unlock(&bus_list_lock); + /* the msg is bound for virtpci; send + * guest_msgs struct to callback + */ + if (retval == CONTROLVM_RESP_SUCCESS) { + if (uuid_le_cmp(dev->channel_uuid, + spar_vhba_channel_protocol_uuid) == 0) { + cmd.msgtype = GUEST_RESUME_VHBA; + cmd.resume_vhba.chanptr = dev->chanptr; + } else if (uuid_le_cmp(dev->channel_uuid, + spar_vnic_channel_protocol_uuid) == 0) { + cmd.msgtype = GUEST_RESUME_VNIC; + cmd.resume_vnic.chanptr = dev->chanptr; + } else { + return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN; + } + if (!virt_control_chan_func) + return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE; + if (!virt_control_chan_func(&cmd)) { + return + CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR; + } + } + return retval; +} + +static int destroy_device(struct controlvm_message *msg, char *buf) +{ + u32 bus_no, dev_no; + struct bus_info *bus; + struct device_info *dev; + struct guest_msgs cmd; + int retval = CONTROLVM_RESP_SUCCESS; + + bus_no = msg->cmd.destroy_device.bus_no; + dev_no = msg->cmd.destroy_device.bus_no; + + read_lock(&bus_list_lock); + for (bus = bus_list; bus; bus = bus->next) { + if (bus->bus_no == bus_no) { + /* make sure the device number is valid */ + if (dev_no >= bus->device_count) { + retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID; + } else { + /* make sure this device exists */ + dev = bus->device[dev_no]; + if (!dev) { + retval = + CONTROLVM_RESP_ERROR_ALREADY_DONE; + } + } + break; + } + } + + if (!bus) + retval = CONTROLVM_RESP_ERROR_BUS_INVALID; + read_unlock(&bus_list_lock); + if (retval == CONTROLVM_RESP_SUCCESS) { + /* the msg is bound for virtpci; send + * guest_msgs struct to callback + */ + if (uuid_le_cmp(dev->channel_uuid, + spar_vhba_channel_protocol_uuid) == 0) { + cmd.msgtype = GUEST_DEL_VHBA; + cmd.del_vhba.chanptr = dev->chanptr; + } else if (uuid_le_cmp(dev->channel_uuid, + spar_vnic_channel_protocol_uuid) == 0) { + cmd.msgtype = GUEST_DEL_VNIC; + cmd.del_vnic.chanptr = dev->chanptr; + } else { + return + CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN; + } + if (!virt_control_chan_func) { + return + CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE; + } + if (!virt_control_chan_func(&cmd)) { + return + CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR; + } +/* you must disable channel interrupts BEFORE you unmap the channel, + * because if you unmap first, there may still be some activity going + * on which accesses the channel and you will get a "unable to handle + * kernel paging request" + */ + if (dev->polling) + uislib_disable_channel_interrupts(bus_no, dev_no); + /* unmap the channel memory for the device. */ + if (!msg->hdr.flags.test_message) + uislib_iounmap(dev->chanptr); + kfree(dev); + bus->device[dev_no] = NULL; + } + return retval; +} + +static int +init_chipset(struct controlvm_message *msg, char *buf) +{ + POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO); + + max_bus_count = msg->cmd.init_chipset.bus_count; + platform_no = msg->cmd.init_chipset.platform_number; + phys_data_chan = 0; + + /* We need to make sure we have our functions registered + * before processing messages. If we are a test vehicle the + * test_message for init_chipset will be set. We can ignore the + * waits for the callbacks, since this will be manually entered + * from a user. If no test_message is set, we will wait for the + * functions. + */ + if (!msg->hdr.flags.test_message) + WAIT_ON_CALLBACK(virt_control_chan_func); + + chipset_inited = 1; + POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO); + + return CONTROLVM_RESP_SUCCESS; +} + +static int delete_bus_glue(u32 bus_no) +{ + struct controlvm_message msg; + + init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0); + msg.cmd.destroy_bus.bus_no = bus_no; + if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) + return 0; + return 1; +} + +static int delete_device_glue(u32 bus_no, u32 dev_no) +{ + struct controlvm_message msg; + + init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0); + msg.cmd.destroy_device.bus_no = bus_no; + msg.cmd.destroy_device.dev_no = dev_no; + if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) + return 0; + return 1; +} + +int +uislib_client_inject_add_bus(u32 bus_no, uuid_le inst_uuid, + u64 channel_addr, ulong n_channel_bytes) +{ + struct controlvm_message msg; + + /* step 0: init the chipset */ + POSTCODE_LINUX_3(CHIPSET_INIT_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO); + + if (!chipset_inited) { + /* step: initialize the chipset */ + init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0); + /* this change is needed so that console will come up + * OK even when the bus 0 create comes in late. If the + * bus 0 create is the first create, then the add_vnic + * will work fine, but if the bus 0 create arrives + * after number 4, then the add_vnic will fail, and the + * ultraboot will fail. + */ + msg.cmd.init_chipset.bus_count = 23; + msg.cmd.init_chipset.switch_count = 0; + if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS) + return 0; + POSTCODE_LINUX_3(CHIPSET_INIT_EXIT_PC, bus_no, + POSTCODE_SEVERITY_INFO); + } + + /* step 1: create a bus */ + POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, + POSTCODE_SEVERITY_WARNING); + init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0); + msg.cmd.create_bus.bus_no = bus_no; + msg.cmd.create_bus.dev_count = 23; /* devNo+1; */ + msg.cmd.create_bus.channel_addr = channel_addr; + msg.cmd.create_bus.channel_bytes = n_channel_bytes; + if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) { + POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, + POSTCODE_SEVERITY_ERR); + return 0; + } + POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO); + + return 1; +} +EXPORT_SYMBOL_GPL(uislib_client_inject_add_bus); + +int +uislib_client_inject_del_bus(u32 bus_no) +{ + return delete_bus_glue(bus_no); +} +EXPORT_SYMBOL_GPL(uislib_client_inject_del_bus); + +int +uislib_client_inject_pause_vhba(u32 bus_no, u32 dev_no) +{ + struct controlvm_message msg; + int rc; + + init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0); + msg.cmd.device_change_state.bus_no = bus_no; + msg.cmd.device_change_state.dev_no = dev_no; + msg.cmd.device_change_state.state = segment_state_standby; + rc = pause_device(&msg); + if (rc != CONTROLVM_RESP_SUCCESS) + return rc; + return 0; +} +EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vhba); + +int +uislib_client_inject_resume_vhba(u32 bus_no, u32 dev_no) +{ + struct controlvm_message msg; + int rc; + + init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0); + msg.cmd.device_change_state.bus_no = bus_no; + msg.cmd.device_change_state.dev_no = dev_no; + msg.cmd.device_change_state.state = segment_state_running; + rc = resume_device(&msg); + if (rc != CONTROLVM_RESP_SUCCESS) + return rc; + return 0; +} +EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vhba); + +int +uislib_client_inject_add_vhba(u32 bus_no, u32 dev_no, + u64 phys_chan_addr, u32 chan_bytes, + int is_test_addr, uuid_le inst_uuid, + struct irq_info *intr) +{ + struct controlvm_message msg; + + /* chipset init'ed with bus bus has been previously created - + * Verify it still exists step 2: create the VHBA device on the + * bus + */ + POSTCODE_LINUX_4(VHBA_CREATE_ENTRY_PC, dev_no, bus_no, + POSTCODE_SEVERITY_INFO); + + init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0); + if (is_test_addr) + /* signify that the physical channel address does NOT + * need to be ioremap()ed + */ + msg.hdr.flags.test_message = 1; + msg.cmd.create_device.bus_no = bus_no; + msg.cmd.create_device.dev_no = dev_no; + msg.cmd.create_device.dev_inst_uuid = inst_uuid; + if (intr) + msg.cmd.create_device.intr = *intr; + else + memset(&msg.cmd.create_device.intr, 0, + sizeof(struct irq_info)); + msg.cmd.create_device.channel_addr = phys_chan_addr; + if (chan_bytes < MIN_IO_CHANNEL_SIZE) { + POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, chan_bytes, + MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR); + return 0; + } + msg.cmd.create_device.channel_bytes = chan_bytes; + msg.cmd.create_device.data_type_uuid = spar_vhba_channel_protocol_uuid; + if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) { + POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + return 0; + } + POSTCODE_LINUX_4(VHBA_CREATE_SUCCESS_PC, dev_no, bus_no, + POSTCODE_SEVERITY_INFO); + return 1; +} +EXPORT_SYMBOL_GPL(uislib_client_inject_add_vhba); + +int +uislib_client_inject_del_vhba(u32 bus_no, u32 dev_no) +{ + return delete_device_glue(bus_no, dev_no); +} +EXPORT_SYMBOL_GPL(uislib_client_inject_del_vhba); + +int +uislib_client_inject_add_vnic(u32 bus_no, u32 dev_no, + u64 phys_chan_addr, u32 chan_bytes, + int is_test_addr, uuid_le inst_uuid, + struct irq_info *intr) +{ + struct controlvm_message msg; + + /* chipset init'ed with bus bus has been previously created - + * Verify it still exists step 2: create the VNIC device on the + * bus + */ + POSTCODE_LINUX_4(VNIC_CREATE_ENTRY_PC, dev_no, bus_no, + POSTCODE_SEVERITY_INFO); + + init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0); + if (is_test_addr) + /* signify that the physical channel address does NOT + * need to be ioremap()ed + */ + msg.hdr.flags.test_message = 1; + msg.cmd.create_device.bus_no = bus_no; + msg.cmd.create_device.dev_no = dev_no; + msg.cmd.create_device.dev_inst_uuid = inst_uuid; + if (intr) + msg.cmd.create_device.intr = *intr; + else + memset(&msg.cmd.create_device.intr, 0, + sizeof(struct irq_info)); + msg.cmd.create_device.channel_addr = phys_chan_addr; + if (chan_bytes < MIN_IO_CHANNEL_SIZE) { + POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, chan_bytes, + MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR); + return 0; + } + msg.cmd.create_device.channel_bytes = chan_bytes; + msg.cmd.create_device.data_type_uuid = spar_vnic_channel_protocol_uuid; + if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) { + POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + return 0; + } + + POSTCODE_LINUX_4(VNIC_CREATE_SUCCESS_PC, dev_no, bus_no, + POSTCODE_SEVERITY_INFO); + return 1; +} +EXPORT_SYMBOL_GPL(uislib_client_inject_add_vnic); + +int +uislib_client_inject_pause_vnic(u32 bus_no, u32 dev_no) +{ + struct controlvm_message msg; + int rc; + + init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0); + msg.cmd.device_change_state.bus_no = bus_no; + msg.cmd.device_change_state.dev_no = dev_no; + msg.cmd.device_change_state.state = segment_state_standby; + rc = pause_device(&msg); + if (rc != CONTROLVM_RESP_SUCCESS) + return -1; + return 0; +} +EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vnic); + +int +uislib_client_inject_resume_vnic(u32 bus_no, u32 dev_no) +{ + struct controlvm_message msg; + int rc; + + init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0); + msg.cmd.device_change_state.bus_no = bus_no; + msg.cmd.device_change_state.dev_no = dev_no; + msg.cmd.device_change_state.state = segment_state_running; + rc = resume_device(&msg); + if (rc != CONTROLVM_RESP_SUCCESS) + return -1; + return 0; +} +EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vnic); + +int +uislib_client_inject_del_vnic(u32 bus_no, u32 dev_no) +{ + return delete_device_glue(bus_no, dev_no); +} +EXPORT_SYMBOL_GPL(uislib_client_inject_del_vnic); + +void * +uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln) +{ + /* __GFP_NORETRY means "ok to fail", meaning kmalloc() can + * return NULL. If you do NOT specify __GFP_NORETRY, Linux + * will go to extreme measures to get memory for you (like, + * invoke oom killer), which will probably cripple the system. + */ + void *p = kmem_cache_alloc(cur_pool, GFP_ATOMIC | __GFP_NORETRY); + + if (!p) + return NULL; + return p; +} +EXPORT_SYMBOL_GPL(uislib_cache_alloc); + +void +uislib_cache_free(struct kmem_cache *cur_pool, void *p, char *fn, int ln) +{ + if (!p) + return; + kmem_cache_free(cur_pool, p); +} +EXPORT_SYMBOL_GPL(uislib_cache_free); + +/*****************************************************/ +/* proc filesystem callback functions */ +/*****************************************************/ + +#define PLINE(...) uisutil_add_proc_line_ex(&tot, buff, \ + buff_len, __VA_ARGS__) + +static int +info_debugfs_read_helper(char **buff, int *buff_len) +{ + int i, tot = 0; + struct bus_info *bus; + + if (PLINE("\nBuses:\n") < 0) + goto err_done; + + read_lock(&bus_list_lock); + for (bus = bus_list; bus; bus = bus->next) { + if (PLINE(" bus=0x%p, busNo=%d, deviceCount=%d\n", + bus, bus->bus_no, bus->device_count) < 0) + goto err_done_unlock; + + if (PLINE(" Devices:\n") < 0) + goto err_done_unlock; + + for (i = 0; i < bus->device_count; i++) { + if (bus->device[i]) { + if (PLINE(" busNo %d, device[%i]: 0x%p, chanptr=0x%p, swtch=0x%p\n", + bus->bus_no, i, bus->device[i], + bus->device[i]->chanptr, + bus->device[i]->swtch) < 0) + goto err_done_unlock; + + if (PLINE(" first_busy_cnt=%llu, moved_to_tail_cnt=%llu, last_on_list_cnt=%llu\n", + bus->device[i]->first_busy_cnt, + bus->device[i]->moved_to_tail_cnt, + bus->device[i]->last_on_list_cnt) < 0) + goto err_done_unlock; + } + } + } + read_unlock(&bus_list_lock); + + if (PLINE("UisUtils_Registered_Services: %d\n", + atomic_read(&uisutils_registered_services)) < 0) + goto err_done; + if (PLINE("cycles_before_wait %llu wait_cycles:%llu\n", + cycles_before_wait, wait_cycles) < 0) + goto err_done; + if (PLINE("tot_wakeup_cnt %llu:tot_wait_cnt %llu:tot_schedule_cnt %llu\n", + tot_wakeup_cnt, tot_wait_cnt, tot_schedule_cnt) < 0) + goto err_done; + if (PLINE("en_smart_wakeup %d\n", en_smart_wakeup) < 0) + goto err_done; + if (PLINE("tot_moved_to_tail_cnt %llu\n", tot_moved_to_tail_cnt) < 0) + goto err_done; + + return tot; + +err_done_unlock: + read_unlock(&bus_list_lock); +err_done: + return -1; +} + +static ssize_t info_debugfs_read(struct file *file, char __user *buf, + size_t len, loff_t *offset) +{ + char *temp; + int total_bytes = 0; + int remaining_bytes = PROC_READ_BUFFER_SIZE; + +/* *start = buf; */ + if (!debug_buf) { + debug_buf = vmalloc(PROC_READ_BUFFER_SIZE); + + if (!debug_buf) + return -ENOMEM; + } + + temp = debug_buf; + + if ((*offset == 0) || (!debug_buf_valid)) { + /* if the read fails, then -1 will be returned */ + total_bytes = info_debugfs_read_helper(&temp, &remaining_bytes); + debug_buf_valid = 1; + } else { + total_bytes = strlen(debug_buf); + } + + return simple_read_from_buffer(buf, len, offset, + debug_buf, total_bytes); +} + +static struct device_info *find_dev(u32 bus_no, u32 dev_no) +{ + struct bus_info *bus; + struct device_info *dev = NULL; + + read_lock(&bus_list_lock); + for (bus = bus_list; bus; bus = bus->next) { + if (bus->bus_no == bus_no) { + /* make sure the device number is valid */ + if (dev_no >= bus->device_count) + break; + dev = bus->device[dev_no]; + break; + } + } + read_unlock(&bus_list_lock); + return dev; +} + +/* This thread calls the "interrupt" function for each device that has + * enabled such using uislib_enable_channel_interrupts(). The "interrupt" + * function typically reads and processes the devices's channel input + * queue. This thread repeatedly does this, until the thread is told to stop + * (via uisthread_stop()). Sleeping rules: + * - If we have called the "interrupt" function for all devices, and all of + * them have reported "nothing processed" (returned 0), then we will go to + * sleep for a maximum of POLLJIFFIES_NORMAL jiffies. + * - If anyone calls uislib_force_channel_interrupt(), the above jiffy + * sleep will be interrupted, and we will resume calling the "interrupt" + * function for all devices. + * - The list of devices is dynamically re-ordered in order to + * attempt to preserve fairness. Whenever we spin thru the list of + * devices and call the dev->interrupt() function, if we find + * devices which report that there is still more work to do, the + * the first such device we find is moved to the end of the device + * list. This ensures that extremely busy devices don't starve out + * less-busy ones. + * + */ +static int process_incoming(void *v) +{ + unsigned long long cur_cycles, old_cycles, idle_cycles, delta_cycles; + struct list_head *new_tail = NULL; + int i; + + UIS_DAEMONIZE("dev_incoming"); + for (i = 0; i < 16; i++) { + old_cycles = get_cycles(); + wait_event_timeout(poll_dev_wake_q, + 0, POLLJIFFIES_NORMAL); + cur_cycles = get_cycles(); + if (wait_cycles == 0) { + wait_cycles = (cur_cycles - old_cycles); + } else { + if (wait_cycles < (cur_cycles - old_cycles)) + wait_cycles = (cur_cycles - old_cycles); + } + } + cycles_before_wait = wait_cycles; + idle_cycles = 0; + poll_dev_start = 0; + while (1) { + struct list_head *lelt, *tmp; + struct device_info *dev = NULL; + + /* poll each channel for input */ + down(&poll_dev_lock); + new_tail = NULL; + list_for_each_safe(lelt, tmp, &poll_dev_chan) { + int rc = 0; + + dev = list_entry(lelt, struct device_info, + list_polling_device_channels); + down(&dev->interrupt_callback_lock); + if (dev->interrupt) + rc = dev->interrupt(dev->interrupt_context); + else + continue; + up(&dev->interrupt_callback_lock); + if (rc) { + /* dev->interrupt returned, but there + * is still more work to do. + * Reschedule work to occur as soon as + * possible. */ + idle_cycles = 0; + if (!new_tail) { + dev->first_busy_cnt++; + if (! + (list_is_last + (lelt, + &poll_dev_chan))) { + new_tail = lelt; + dev->moved_to_tail_cnt++; + } else { + dev->last_on_list_cnt++; + } + } + } + if (kthread_should_stop()) + break; + } + if (new_tail) { + tot_moved_to_tail_cnt++; + list_move_tail(new_tail, &poll_dev_chan); + } + up(&poll_dev_lock); + cur_cycles = get_cycles(); + delta_cycles = cur_cycles - old_cycles; + old_cycles = cur_cycles; + + /* At this point, we have scanned thru all of the + * channels, and at least one of the following is true: + * - there is no input waiting on any of the channels + * - we have received a signal to stop this thread + */ + if (kthread_should_stop()) + break; + if (en_smart_wakeup == 0xFF) + break; + /* wait for POLLJIFFIES_NORMAL jiffies, or until + * someone wakes up poll_dev_wake_q, + * whichever comes first only do a wait when we have + * been idle for cycles_before_wait cycles. + */ + if (idle_cycles > cycles_before_wait) { + poll_dev_start = 0; + tot_wait_cnt++; + wait_event_timeout(poll_dev_wake_q, + poll_dev_start, + POLLJIFFIES_NORMAL); + poll_dev_start = 1; + } else { + tot_schedule_cnt++; + schedule(); + idle_cycles = idle_cycles + delta_cycles; + } + } + complete_and_exit(&incoming_ti.has_stopped, 0); +} + +static BOOL +initialize_incoming_thread(void) +{ + if (incoming_started) + return TRUE; + if (!uisthread_start(&incoming_ti, + &process_incoming, NULL, "dev_incoming")) { + return FALSE; + } + incoming_started = TRUE; + return TRUE; +} + +/* Add a new device/channel to the list being processed by + * process_incoming(). + * - indicates the function to call periodically. + * - indicates the data to pass to the + * function. + */ +void +uislib_enable_channel_interrupts(u32 bus_no, u32 dev_no, + int (*interrupt)(void *), + void *interrupt_context) +{ + struct device_info *dev; + + dev = find_dev(bus_no, dev_no); + if (!dev) + return; + + down(&poll_dev_lock); + initialize_incoming_thread(); + dev->interrupt = interrupt; + dev->interrupt_context = interrupt_context; + dev->polling = TRUE; + list_add_tail(&dev->list_polling_device_channels, + &poll_dev_chan); + up(&poll_dev_lock); +} +EXPORT_SYMBOL_GPL(uislib_enable_channel_interrupts); + +/* Remove a device/channel from the list being processed by + * process_incoming(). + */ +void +uislib_disable_channel_interrupts(u32 bus_no, u32 dev_no) +{ + struct device_info *dev; + + dev = find_dev(bus_no, dev_no); + if (!dev) + return; + down(&poll_dev_lock); + list_del(&dev->list_polling_device_channels); + dev->polling = FALSE; + dev->interrupt = NULL; + up(&poll_dev_lock); +} +EXPORT_SYMBOL_GPL(uislib_disable_channel_interrupts); + +static void +do_wakeup_polling_device_channels(struct work_struct *dummy) +{ + if (!poll_dev_start) { + poll_dev_start = 1; + wake_up(&poll_dev_wake_q); + } +} + +static DECLARE_WORK(work_wakeup_polling_device_channels, + do_wakeup_polling_device_channels); + +/* Call this function when you want to send a hint to process_incoming() that + * your device might have more requests. + */ +void +uislib_force_channel_interrupt(u32 bus_no, u32 dev_no) +{ + if (en_smart_wakeup == 0) + return; + if (poll_dev_start) + return; + /* The point of using schedule_work() instead of just doing + * the work inline is to force a slight delay before waking up + * the process_incoming() thread. + */ + tot_wakeup_cnt++; + schedule_work(&work_wakeup_polling_device_channels); +} +EXPORT_SYMBOL_GPL(uislib_force_channel_interrupt); + +/*****************************************************/ +/* Module Init & Exit functions */ +/*****************************************************/ + +static int __init +uislib_mod_init(void) +{ + if (!unisys_spar_platform) + return -ENODEV; + + /* initialize global pointers to NULL */ + bus_list = NULL; + bus_list_count = 0; + max_bus_count = 0; + rwlock_init(&bus_list_lock); + virt_control_chan_func = NULL; + + /* Issue VMCALL_GET_CONTROLVM_ADDR to get CtrlChanPhysAddr and + * then map this physical address to a virtual address. */ + POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO); + + dir_debugfs = debugfs_create_dir(DIR_DEBUGFS_ENTRY, NULL); + if (dir_debugfs) { + info_debugfs_entry = debugfs_create_file( + INFO_DEBUGFS_ENTRY_FN, 0444, dir_debugfs, NULL, + &debugfs_info_fops); + + platformnumber_debugfs_read = debugfs_create_u32( + PLATFORMNUMBER_DEBUGFS_ENTRY_FN, 0444, dir_debugfs, + &platform_no); + + cycles_before_wait_debugfs_read = debugfs_create_u64( + CYCLES_BEFORE_WAIT_DEBUGFS_ENTRY_FN, 0666, dir_debugfs, + &cycles_before_wait); + + smart_wakeup_debugfs_entry = debugfs_create_bool( + SMART_WAKEUP_DEBUGFS_ENTRY_FN, 0666, dir_debugfs, + &en_smart_wakeup); + } + + POSTCODE_LINUX_3(DRIVER_EXIT_PC, 0, POSTCODE_SEVERITY_INFO); + return 0; +} + +static void __exit +uislib_mod_exit(void) +{ + if (debug_buf) { + vfree(debug_buf); + debug_buf = NULL; + } + + debugfs_remove(info_debugfs_entry); + debugfs_remove(smart_wakeup_debugfs_entry); + debugfs_remove(cycles_before_wait_debugfs_read); + debugfs_remove(platformnumber_debugfs_read); + debugfs_remove(dir_debugfs); +} + +module_init(uislib_mod_init); +module_exit(uislib_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Usha Srinivasan"); +MODULE_ALIAS("uislib"); + /* this is extracted during depmod and kept in modules.dep */ diff --git a/kernel/drivers/staging/unisys/uislib/uisqueue.c b/kernel/drivers/staging/unisys/uislib/uisqueue.c new file mode 100644 index 000000000..d46dd7428 --- /dev/null +++ b/kernel/drivers/staging/unisys/uislib/uisqueue.c @@ -0,0 +1,322 @@ +/* uisqueue.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* @ALL_INSPECTED */ +#include +#include + +#include "uisutils.h" + +/* this is shorter than using __FILE__ (full path name) in + * debug/info/error messages */ +#define CURRENT_FILE_PC UISLIB_PC_uisqueue_c +#define __MYFILE__ "uisqueue.c" + +#define CHECK_CACHE_ALIGN 0 + +/*****************************************************/ +/* Exported functions */ +/*****************************************************/ + +/* + * Routine Description: + * Tries to insert the prebuilt signal pointed to by pSignal into the nth + * Queue of the Channel pointed to by pChannel + * + * Parameters: + * pChannel: (IN) points to the IO Channel + * Queue: (IN) nth Queue of the IO Channel + * pSignal: (IN) pointer to the signal + * + * Assumptions: + * - pChannel, Queue and pSignal are valid. + * - If insertion fails due to a full queue, the caller will determine the + * retry policy (e.g. wait & try again, report an error, etc.). + * + * Return value: + * 1 if the insertion succeeds, 0 if the queue was full. + */ +unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue, + void *sig) +{ + void __iomem *psignal; + unsigned int head, tail, nof; + + struct signal_queue_header __iomem *pqhdr = + (struct signal_queue_header __iomem *) + ((char __iomem *)ch + readq(&ch->ch_space_offset)) + + queue; + + /* capture current head and tail */ + head = readl(&pqhdr->head); + tail = readl(&pqhdr->tail); + + /* queue is full if (head + 1) % n equals tail */ + if (((head + 1) % readl(&pqhdr->max_slots)) == tail) { + nof = readq(&pqhdr->num_overflows) + 1; + writeq(nof, &pqhdr->num_overflows); + return 0; + } + + /* increment the head index */ + head = (head + 1) % readl(&pqhdr->max_slots); + + /* copy signal to the head location from the area pointed to + * by pSignal + */ + psignal = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) + + (head * readl(&pqhdr->signal_size)); + memcpy_toio(psignal, sig, readl(&pqhdr->signal_size)); + + mb(); /* channel synch */ + writel(head, &pqhdr->head); + + writeq(readq(&pqhdr->num_sent) + 1, &pqhdr->num_sent); + return 1; +} +EXPORT_SYMBOL_GPL(spar_signal_insert); + +/* + * Routine Description: + * Removes one signal from Channel pChannel's nth Queue at the + * time of the call and copies it into the memory pointed to by + * pSignal. + * + * Parameters: + * pChannel: (IN) points to the IO Channel + * Queue: (IN) nth Queue of the IO Channel + * pSignal: (IN) pointer to where the signals are to be copied + * + * Assumptions: + * - pChannel and Queue are valid. + * - pSignal points to a memory area large enough to hold queue's SignalSize + * + * Return value: + * 1 if the removal succeeds, 0 if the queue was empty. + */ +unsigned char +spar_signal_remove(struct channel_header __iomem *ch, u32 queue, void *sig) +{ + void __iomem *psource; + unsigned int head, tail; + struct signal_queue_header __iomem *pqhdr = + (struct signal_queue_header __iomem *)((char __iomem *)ch + + readq(&ch->ch_space_offset)) + queue; + + /* capture current head and tail */ + head = readl(&pqhdr->head); + tail = readl(&pqhdr->tail); + + /* queue is empty if the head index equals the tail index */ + if (head == tail) { + writeq(readq(&pqhdr->num_empty) + 1, &pqhdr->num_empty); + return 0; + } + + /* advance past the 'empty' front slot */ + tail = (tail + 1) % readl(&pqhdr->max_slots); + + /* copy signal from tail location to the area pointed to by pSignal */ + psource = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) + + (tail * readl(&pqhdr->signal_size)); + memcpy_fromio(sig, psource, readl(&pqhdr->signal_size)); + + mb(); /* channel synch */ + writel(tail, &pqhdr->tail); + + writeq(readq(&pqhdr->num_received) + 1, + &pqhdr->num_received); + return 1; +} +EXPORT_SYMBOL_GPL(spar_signal_remove); + +/* + * Routine Description: + * Removes all signals present in Channel pChannel's nth Queue at the + * time of the call and copies them into the memory pointed to by + * pSignal. Returns the # of signals copied as the value of the routine. + * + * Parameters: + * pChannel: (IN) points to the IO Channel + * Queue: (IN) nth Queue of the IO Channel + * pSignal: (IN) pointer to where the signals are to be copied + * + * Assumptions: + * - pChannel and Queue are valid. + * - pSignal points to a memory area large enough to hold Queue's MaxSignals + * # of signals, each of which is Queue's SignalSize. + * + * Return value: + * # of signals copied. + */ +unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue, + void *sig) +{ + void *psource; + unsigned int head, tail, count = 0; + struct signal_queue_header *pqhdr = + (struct signal_queue_header *)((char *)ch + + ch->ch_space_offset) + queue; + + /* capture current head and tail */ + head = pqhdr->head; + tail = pqhdr->tail; + + /* queue is empty if the head index equals the tail index */ + if (head == tail) + return 0; + + while (head != tail) { + /* advance past the 'empty' front slot */ + tail = (tail + 1) % pqhdr->max_slots; + + /* copy signal from tail location to the area pointed + * to by pSignal + */ + psource = + (char *)pqhdr + pqhdr->sig_base_offset + + (tail * pqhdr->signal_size); + memcpy((char *)sig + (pqhdr->signal_size * count), + psource, pqhdr->signal_size); + + mb(); /* channel synch */ + pqhdr->tail = tail; + + count++; + pqhdr->num_received++; + } + + return count; +} + +/* + * Routine Description: + * Determine whether a signal queue is empty. + * + * Parameters: + * pChannel: (IN) points to the IO Channel + * Queue: (IN) nth Queue of the IO Channel + * + * Return value: + * 1 if the signal queue is empty, 0 otherwise. + */ +unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch, + u32 queue) +{ + struct signal_queue_header __iomem *pqhdr = + (struct signal_queue_header __iomem *)((char __iomem *)ch + + readq(&ch->ch_space_offset)) + queue; + return readl(&pqhdr->head) == readl(&pqhdr->tail); +} +EXPORT_SYMBOL_GPL(spar_signalqueue_empty); + +unsigned long long +uisqueue_interlocked_or(unsigned long long __iomem *tgt, + unsigned long long set) +{ + unsigned long long i; + unsigned long long j; + + j = readq(tgt); + do { + i = j; + j = cmpxchg((__force unsigned long long *)tgt, i, i | set); + + } while (i != j); + + return j; +} +EXPORT_SYMBOL_GPL(uisqueue_interlocked_or); + +unsigned long long +uisqueue_interlocked_and(unsigned long long __iomem *tgt, + unsigned long long set) +{ + unsigned long long i; + unsigned long long j; + + j = readq(tgt); + do { + i = j; + j = cmpxchg((__force unsigned long long *)tgt, i, i & set); + + } while (i != j); + + return j; +} +EXPORT_SYMBOL_GPL(uisqueue_interlocked_and); + +static u8 +do_locked_client_insert(struct uisqueue_info *queueinfo, + unsigned int whichqueue, + void *signal, + spinlock_t *lock, + u8 *channel_id) +{ + unsigned long flags; + u8 rc = 0; + + spin_lock_irqsave(lock, flags); + if (!spar_channel_client_acquire_os(queueinfo->chan, channel_id)) + goto unlock; + if (spar_signal_insert(queueinfo->chan, whichqueue, signal)) { + queueinfo->packets_sent++; + rc = 1; + } + spar_channel_client_release_os(queueinfo->chan, channel_id); +unlock: + spin_unlock_irqrestore((spinlock_t *)lock, flags); + return rc; +} + +int +uisqueue_put_cmdrsp_with_lock_client(struct uisqueue_info *queueinfo, + struct uiscmdrsp *cmdrsp, + unsigned int whichqueue, + void *insertlock, + unsigned char issue_irq_if_empty, + u64 irq_handle, + char oktowait, u8 *channel_id) +{ + while (!do_locked_client_insert(queueinfo, whichqueue, cmdrsp, + (spinlock_t *)insertlock, + channel_id)) { + if (oktowait != OK_TO_WAIT) + return 0; /* failed to queue */ + + /* try again */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(10)); + } + return 1; +} +EXPORT_SYMBOL_GPL(uisqueue_put_cmdrsp_with_lock_client); + +/* uisqueue_get_cmdrsp gets the cmdrsp entry at the head of the queue + * returns NULL if queue is empty */ +int +uisqueue_get_cmdrsp(struct uisqueue_info *queueinfo, + void *cmdrsp, unsigned int whichqueue) +{ + if (!spar_signal_remove(queueinfo->chan, whichqueue, cmdrsp)) + return 0; + + queueinfo->packets_received++; + + return 1; /* Success */ +} +EXPORT_SYMBOL_GPL(uisqueue_get_cmdrsp); diff --git a/kernel/drivers/staging/unisys/uislib/uisthread.c b/kernel/drivers/staging/unisys/uislib/uisthread.c new file mode 100644 index 000000000..d3c973b61 --- /dev/null +++ b/kernel/drivers/staging/unisys/uislib/uisthread.c @@ -0,0 +1,69 @@ +/* uisthread.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* @ALL_INSPECTED */ +#include +#include +#include +#include +#include "uisutils.h" +#include "uisthread.h" + +/* this is shorter than using __FILE__ (full path name) in + * debug/info/error messages + */ +#define CURRENT_FILE_PC UISLIB_PC_uisthread_c +#define __MYFILE__ "uisthread.c" + +/*****************************************************/ +/* Exported functions */ +/*****************************************************/ + +/* returns 0 for failure, 1 for success */ +int +uisthread_start(struct uisthread_info *thrinfo, + int (*threadfn)(void *), void *thrcontext, char *name) +{ + /* used to stop the thread */ + init_completion(&thrinfo->has_stopped); + thrinfo->task = kthread_run(threadfn, thrcontext, name); + if (IS_ERR(thrinfo->task)) { + thrinfo->id = 0; + return 0; /* failure */ + } + thrinfo->id = thrinfo->task->pid; + return 1; +} +EXPORT_SYMBOL_GPL(uisthread_start); + +void +uisthread_stop(struct uisthread_info *thrinfo) +{ + int stopped = 0; + + if (thrinfo->id == 0) + return; /* thread not running */ + + kthread_stop(thrinfo->task); + /* give up if the thread has NOT died in 1 minute */ + if (wait_for_completion_timeout(&thrinfo->has_stopped, 60 * HZ)) + stopped = 1; + + if (stopped) + thrinfo->id = 0; +} +EXPORT_SYMBOL_GPL(uisthread_stop); diff --git a/kernel/drivers/staging/unisys/uislib/uisutils.c b/kernel/drivers/staging/unisys/uislib/uisutils.c new file mode 100644 index 000000000..26ab76526 --- /dev/null +++ b/kernel/drivers/staging/unisys/uislib/uisutils.c @@ -0,0 +1,137 @@ +/* uisutils.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#include +#include +#include +#include +#include +#include +#include "uisutils.h" +#include "version.h" +#include "vbushelper.h" +#include +#ifdef CONFIG_HIGHMEM +#include +#endif + +/* this is shorter than using __FILE__ (full path name) in + * debug/info/error messages + */ +#define CURRENT_FILE_PC UISLIB_PC_uisutils_c +#define __MYFILE__ "uisutils.c" + +/* exports */ +atomic_t uisutils_registered_services = ATOMIC_INIT(0); + /* num registrations via + * uisctrl_register_req_handler() or + * uisctrl_register_req_handler_ex() */ + +/*****************************************************/ +/* Utility functions */ +/*****************************************************/ + +int +uisutil_add_proc_line_ex(int *total, char **buffer, int *buffer_remaining, + char *format, ...) +{ + va_list args; + int len; + + va_start(args, format); + len = vsnprintf(*buffer, *buffer_remaining, format, args); + va_end(args); + if (len >= *buffer_remaining) { + *buffer += *buffer_remaining; + *total += *buffer_remaining; + *buffer_remaining = 0; + return -1; + } + *buffer_remaining -= len; + *buffer += len; + *total += len; + return len; +} +EXPORT_SYMBOL_GPL(uisutil_add_proc_line_ex); + +int +uisctrl_register_req_handler(int type, void *fptr, + struct ultra_vbus_deviceinfo *chipset_driver_info) +{ + switch (type) { + case 2: + if (fptr) { + if (!virt_control_chan_func) + atomic_inc(&uisutils_registered_services); + virt_control_chan_func = fptr; + } else { + if (virt_control_chan_func) + atomic_dec(&uisutils_registered_services); + virt_control_chan_func = NULL; + } + break; + + default: + return 0; + } + if (chipset_driver_info) + bus_device_info_init(chipset_driver_info, "chipset", "uislib", + VERSION, NULL); + + return 1; +} +EXPORT_SYMBOL_GPL(uisctrl_register_req_handler); + +/* + * unsigned int uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx, + * void *skb_in, + * unsigned int firstfraglen, + * unsigned int frags_max, + * struct phys_info frags[]) + * + * calling_ctx - input - a string that is displayed to show + * who called * this func + * void *skb_in - skb whose frag info we're copying type is hidden so we + * don't need to include skbbuff in uisutils.h which is + * included in non-networking code. + * unsigned int firstfraglen - input - length of first fragment in skb + * unsigned int frags_max - input - max len of frags array + * struct phys_info frags[] - output - frags array filled in on output + * return value indicates number of + * entries filled in frags + */ + +static LIST_HEAD(req_handler_info_list); /* list of struct req_handler_info */ +static DEFINE_SPINLOCK(req_handler_info_list_lock); + +struct req_handler_info * +req_handler_find(uuid_le switch_uuid) +{ + struct list_head *lelt, *tmp; + struct req_handler_info *entry = NULL; + + spin_lock(&req_handler_info_list_lock); + list_for_each_safe(lelt, tmp, &req_handler_info_list) { + entry = list_entry(lelt, struct req_handler_info, list_link); + if (uuid_le_cmp(entry->switch_uuid, switch_uuid) == 0) { + spin_unlock(&req_handler_info_list_lock); + return entry; + } + } + spin_unlock(&req_handler_info_list_lock); + return NULL; +} diff --git a/kernel/drivers/staging/unisys/virthba/Kconfig b/kernel/drivers/staging/unisys/virthba/Kconfig new file mode 100644 index 000000000..dfadfc491 --- /dev/null +++ b/kernel/drivers/staging/unisys/virthba/Kconfig @@ -0,0 +1,13 @@ +# +# Unisys virthba configuration +# + +config UNISYS_VIRTHBA + tristate "Unisys virthba driver" + depends on SCSI + select UNISYS_VISORCHIPSET + select UNISYS_UISLIB + select UNISYS_VIRTPCI + ---help--- + If you say Y here, you will enable the Unisys virthba driver. + diff --git a/kernel/drivers/staging/unisys/virthba/Makefile b/kernel/drivers/staging/unisys/virthba/Makefile new file mode 100644 index 000000000..a4e403739 --- /dev/null +++ b/kernel/drivers/staging/unisys/virthba/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for Unisys virthba +# + +obj-$(CONFIG_UNISYS_VIRTHBA) += virthba.o + +ccflags-y += -Idrivers/staging/unisys/include +ccflags-y += -Idrivers/staging/unisys/uislib +ccflags-y += -Idrivers/staging/unisys/visorchipset +ccflags-y += -Idrivers/staging/unisys/virtpci +ccflags-y += -Idrivers/staging/unisys/common-spar/include +ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels diff --git a/kernel/drivers/staging/unisys/virthba/virthba.c b/kernel/drivers/staging/unisys/virthba/virthba.c new file mode 100644 index 000000000..d9001cca0 --- /dev/null +++ b/kernel/drivers/staging/unisys/virthba/virthba.c @@ -0,0 +1,1572 @@ +/* virthba.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#define EXPORT_SYMTAB + +/* if you want to turn on some debugging of write device data or read + * device data, define these two undefs. You will probably want to + * customize the code which is here since it was written assuming + * reading and writing a specific data file df.64M.txt which is a + * 64Megabyte file created by Art Nilson using a scritp I wrote called + * cr_test_data.pl. The data file consists of 256 byte lines of text + * which start with an 8 digit sequence number, a colon, and then + * letters after that */ + +#include +#ifdef CONFIG_MODVERSIONS +#include +#endif + +#include "diagnostics/appos_subsystems.h" +#include "uisutils.h" +#include "uisqueue.h" +#include "uisthread.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virthba.h" +#include "virtpci.h" +#include "visorchipset.h" +#include "version.h" +#include "guestlinuxdebug.h" +/* this is shorter than using __FILE__ (full path name) in + * debug/info/error messages + */ +#define CURRENT_FILE_PC VIRT_HBA_PC_virthba_c +#define __MYFILE__ "virthba.c" + +/* NOTE: L1_CACHE_BYTES >=128 */ +#define DEVICE_ATTRIBUTE struct device_attribute + + /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters + * = 4800 bytes ~ 2^13 = 8192 bytes + */ +#define MAX_BUF 8192 + +/*****************************************************/ +/* Forward declarations */ +/*****************************************************/ +static int virthba_probe(struct virtpci_dev *dev, + const struct pci_device_id *id); +static void virthba_remove(struct virtpci_dev *dev); +static int virthba_abort_handler(struct scsi_cmnd *scsicmd); +static int virthba_bus_reset_handler(struct scsi_cmnd *scsicmd); +static int virthba_device_reset_handler(struct scsi_cmnd *scsicmd); +static int virthba_host_reset_handler(struct scsi_cmnd *scsicmd); +static const char *virthba_get_info(struct Scsi_Host *shp); +static int virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg); +static int virthba_queue_command_lck(struct scsi_cmnd *scsicmd, + void (*virthba_cmnd_done) + (struct scsi_cmnd *)); + +static const struct x86_cpu_id unisys_spar_ids[] = { + { X86_VENDOR_INTEL, 6, 62, X86_FEATURE_ANY }, + {} +}; + +/* Autoload */ +MODULE_DEVICE_TABLE(x86cpu, unisys_spar_ids); + +#ifdef DEF_SCSI_QCMD +static DEF_SCSI_QCMD(virthba_queue_command) +#else +#define virthba_queue_command virthba_queue_command_lck +#endif + +static int virthba_slave_alloc(struct scsi_device *scsidev); +static int virthba_slave_configure(struct scsi_device *scsidev); +static void virthba_slave_destroy(struct scsi_device *scsidev); +static int process_incoming_rsps(void *); +static int virthba_serverup(struct virtpci_dev *virtpcidev); +static int virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state); +static void do_disk_add_remove(struct work_struct *work); +static void virthba_serverdown_complete(struct work_struct *work); +static ssize_t info_debugfs_read(struct file *file, char __user *buf, + size_t len, loff_t *offset); +static ssize_t enable_ints_write(struct file *file, + const char __user *buffer, size_t count, + loff_t *ppos); + +/*****************************************************/ +/* Globals */ +/*****************************************************/ + +static int rsltq_wait_usecs = 4000; /* Default 4ms */ +static unsigned int max_buff_len; + +/* Module options */ +static char *virthba_options = "NONE"; + +static const struct pci_device_id virthba_id_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_VIRTHBA)}, + {0}, +}; + +/* export virthba_id_table */ +MODULE_DEVICE_TABLE(pci, virthba_id_table); + +static struct workqueue_struct *virthba_serverdown_workqueue; + +static struct virtpci_driver virthba_driver = { + .name = "uisvirthba", + .version = VERSION, + .vertag = NULL, + .id_table = virthba_id_table, + .probe = virthba_probe, + .remove = virthba_remove, + .resume = virthba_serverup, + .suspend = virthba_serverdown +}; + +/* The Send and Recive Buffers of the IO Queue may both be full */ +#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS*2) +#define INTERRUPT_VECTOR_MASK 0x3F + +struct scsipending { + char cmdtype; /* Type of pointer that is being stored */ + void *sent; /* The Data being tracked */ + /* struct scsi_cmnd *type for virthba_queue_command */ + /* struct uiscmdrsp *type for management commands */ +}; + +#define VIRTHBA_ERROR_COUNT 30 +#define IOS_ERROR_THRESHOLD 1000 +struct virtdisk_info { + u32 valid; + u32 channel, id, lun; /* Disk Path */ + atomic_t ios_threshold; + atomic_t error_count; + struct virtdisk_info *next; +}; + +/* Each Scsi_Host has a host_data area that contains this struct. */ +struct virthba_info { + struct Scsi_Host *scsihost; + struct virtpci_dev *virtpcidev; + struct list_head dev_info_list; + struct chaninfo chinfo; + struct irq_info intr; /* use recvInterrupt info to receive + interrupts when IOs complete */ + int interrupt_vector; + struct scsipending pending[MAX_PENDING_REQUESTS]; /* Tracks the requests + that have been */ + /* forwarded to the IOVM and haven't returned yet */ + unsigned int nextinsert; /* Start search for next pending + free slot here */ + spinlock_t privlock; + bool serverdown; + bool serverchangingstate; + unsigned long long acquire_failed_cnt; + unsigned long long interrupts_rcvd; + unsigned long long interrupts_notme; + unsigned long long interrupts_disabled; + struct work_struct serverdown_completion; + u64 __iomem *flags_addr; + atomic_t interrupt_rcvd; + wait_queue_head_t rsp_queue; + struct virtdisk_info head; +}; + +/* Work Data for dar_work_queue */ +struct diskaddremove { + u8 add; /* 0-remove, 1-add */ + struct Scsi_Host *shost; /* Scsi Host for this virthba instance */ + u32 channel, id, lun; /* Disk Path */ + struct diskaddremove *next; +}; + +#define virtpci_dev_to_virthba_virthba_get_info(d) \ + container_of(d, struct virthba_info, virtpcidev) + +static DEVICE_ATTRIBUTE *virthba_shost_attrs[]; +static struct scsi_host_template virthba_driver_template = { + .name = "Unisys Virtual HBA", + .info = virthba_get_info, + .ioctl = virthba_ioctl, + .queuecommand = virthba_queue_command, + .eh_abort_handler = virthba_abort_handler, + .eh_device_reset_handler = virthba_device_reset_handler, + .eh_bus_reset_handler = virthba_bus_reset_handler, + .eh_host_reset_handler = virthba_host_reset_handler, + .shost_attrs = virthba_shost_attrs, + +#define VIRTHBA_MAX_CMNDS 128 + .can_queue = VIRTHBA_MAX_CMNDS, + .sg_tablesize = 64, /* largest number of address/length pairs */ + .this_id = -1, + .slave_alloc = virthba_slave_alloc, + .slave_configure = virthba_slave_configure, + .slave_destroy = virthba_slave_destroy, + .use_clustering = ENABLE_CLUSTERING, +}; + +struct virthba_devices_open { + struct virthba_info *virthbainfo; +}; + +static const struct file_operations debugfs_info_fops = { + .read = info_debugfs_read, +}; + +static const struct file_operations debugfs_enable_ints_fops = { + .write = enable_ints_write, +}; + +/*****************************************************/ +/* Structs */ +/*****************************************************/ + +#define VIRTHBASOPENMAX 1 +/* array of open devices maintained by open() and close(); */ +static struct virthba_devices_open virthbas_open[VIRTHBASOPENMAX]; +static struct dentry *virthba_debugfs_dir; + +/*****************************************************/ +/* Local Functions */ +/*****************************************************/ +static int +add_scsipending_entry(struct virthba_info *vhbainfo, char cmdtype, void *new) +{ + unsigned long flags; + int insert_location; + + spin_lock_irqsave(&vhbainfo->privlock, flags); + insert_location = vhbainfo->nextinsert; + while (vhbainfo->pending[insert_location].sent) { + insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS; + if (insert_location == (int)vhbainfo->nextinsert) { + spin_unlock_irqrestore(&vhbainfo->privlock, flags); + return -1; + } + } + + vhbainfo->pending[insert_location].cmdtype = cmdtype; + vhbainfo->pending[insert_location].sent = new; + vhbainfo->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS; + spin_unlock_irqrestore(&vhbainfo->privlock, flags); + + return insert_location; +} + +static unsigned int +add_scsipending_entry_with_wait(struct virthba_info *vhbainfo, char cmdtype, + void *new) +{ + int insert_location = add_scsipending_entry(vhbainfo, cmdtype, new); + + while (insert_location == -1) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(10)); + insert_location = add_scsipending_entry(vhbainfo, cmdtype, new); + } + + return (unsigned int)insert_location; +} + +static void * +del_scsipending_entry(struct virthba_info *vhbainfo, uintptr_t del) +{ + unsigned long flags; + void *sent = NULL; + + if (del < MAX_PENDING_REQUESTS) { + spin_lock_irqsave(&vhbainfo->privlock, flags); + sent = vhbainfo->pending[del].sent; + + vhbainfo->pending[del].cmdtype = 0; + vhbainfo->pending[del].sent = NULL; + spin_unlock_irqrestore(&vhbainfo->privlock, flags); + } + + return sent; +} + +/* dar_work_queue (Disk Add/Remove) */ +static struct work_struct dar_work_queue; +static struct diskaddremove *dar_work_queue_head; +static spinlock_t dar_work_queue_lock; +static unsigned short dar_work_queue_sched; +#define QUEUE_DISKADDREMOVE(dar) { \ + spin_lock_irqsave(&dar_work_queue_lock, flags); \ + if (!dar_work_queue_head) { \ + dar_work_queue_head = dar; \ + dar->next = NULL; \ + } \ + else { \ + dar->next = dar_work_queue_head; \ + dar_work_queue_head = dar; \ + } \ + if (!dar_work_queue_sched) { \ + schedule_work(&dar_work_queue); \ + dar_work_queue_sched = 1; \ + } \ + spin_unlock_irqrestore(&dar_work_queue_lock, flags); \ +} + +static inline void +send_disk_add_remove(struct diskaddremove *dar) +{ + struct scsi_device *sdev; + int error; + + sdev = scsi_device_lookup(dar->shost, dar->channel, dar->id, dar->lun); + if (sdev) { + if (!(dar->add)) + scsi_remove_device(sdev); + } else if (dar->add) { + error = + scsi_add_device(dar->shost, dar->channel, dar->id, + dar->lun); + } + kfree(dar); +} + +/*****************************************************/ +/* dar_work_queue Handler Thread */ +/*****************************************************/ +static void +do_disk_add_remove(struct work_struct *work) +{ + struct diskaddremove *dar; + struct diskaddremove *tmphead; + int i = 0; + unsigned long flags; + + spin_lock_irqsave(&dar_work_queue_lock, flags); + tmphead = dar_work_queue_head; + dar_work_queue_head = NULL; + dar_work_queue_sched = 0; + spin_unlock_irqrestore(&dar_work_queue_lock, flags); + while (tmphead) { + dar = tmphead; + tmphead = dar->next; + send_disk_add_remove(dar); + i++; + } +} + +/*****************************************************/ +/* Routine to add entry to dar_work_queue */ +/*****************************************************/ +static void +process_disk_notify(struct Scsi_Host *shost, struct uiscmdrsp *cmdrsp) +{ + struct diskaddremove *dar; + unsigned long flags; + + dar = kzalloc(sizeof(*dar), GFP_ATOMIC); + if (dar) { + dar->add = cmdrsp->disknotify.add; + dar->shost = shost; + dar->channel = cmdrsp->disknotify.channel; + dar->id = cmdrsp->disknotify.id; + dar->lun = cmdrsp->disknotify.lun; + QUEUE_DISKADDREMOVE(dar); + } +} + +/*****************************************************/ +/* Probe Remove Functions */ +/*****************************************************/ +static irqreturn_t +virthba_isr(int irq, void *dev_id) +{ + struct virthba_info *virthbainfo = (struct virthba_info *)dev_id; + struct channel_header __iomem *channel_header; + struct signal_queue_header __iomem *pqhdr; + u64 mask; + unsigned long long rc1; + + if (!virthbainfo) + return IRQ_NONE; + virthbainfo->interrupts_rcvd++; + channel_header = virthbainfo->chinfo.queueinfo->chan; + if (((readq(&channel_header->features) + & ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0) && + ((readq(&channel_header->features) & + ULTRA_IO_DRIVER_DISABLES_INTS) != + 0)) { + virthbainfo->interrupts_disabled++; + mask = ~ULTRA_CHANNEL_ENABLE_INTS; + rc1 = uisqueue_interlocked_and(virthbainfo->flags_addr, mask); + } + if (spar_signalqueue_empty(channel_header, IOCHAN_FROM_IOPART)) { + virthbainfo->interrupts_notme++; + return IRQ_NONE; + } + pqhdr = (struct signal_queue_header __iomem *) + ((char __iomem *)channel_header + + readq(&channel_header->ch_space_offset)) + IOCHAN_FROM_IOPART; + writeq(readq(&pqhdr->num_irq_received) + 1, + &pqhdr->num_irq_received); + atomic_set(&virthbainfo->interrupt_rcvd, 1); + wake_up_interruptible(&virthbainfo->rsp_queue); + return IRQ_HANDLED; +} + +static int +virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id) +{ + int error; + struct Scsi_Host *scsihost; + struct virthba_info *virthbainfo; + int rsp; + int i; + irq_handler_t handler = virthba_isr; + struct channel_header __iomem *channel_header; + struct signal_queue_header __iomem *pqhdr; + u64 mask; + + POSTCODE_LINUX_2(VHBA_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO); + /* call scsi_host_alloc to register a scsi host adapter + * instance - this virthba that has just been created is an + * instance of a scsi host adapter. This scsi_host_alloc + * function allocates a new Scsi_Host struct & performs basic + * initialization. The host is not published to the scsi + * midlayer until scsi_add_host is called. + */ + + /* arg 2 passed in length of extra space we want allocated + * with scsi_host struct for our own use scsi_host_alloc + * assign host_no + */ + scsihost = scsi_host_alloc(&virthba_driver_template, + sizeof(struct virthba_info)); + if (!scsihost) + return -ENODEV; + + scsihost->this_id = UIS_MAGIC_VHBA; + /* linux treats max-channel differently than max-id & max-lun. + * In the latter cases, those two values result in 0 to max-1 + * (inclusive) being scanned. But in the case of channels, the + * scan is 0 to max (inclusive); so we will subtract one from + * the max-channel value. + */ + scsihost->max_channel = (unsigned)virtpcidev->scsi.max.max_channel; + scsihost->max_id = (unsigned)virtpcidev->scsi.max.max_id; + scsihost->max_lun = (unsigned)virtpcidev->scsi.max.max_lun; + scsihost->cmd_per_lun = (unsigned)virtpcidev->scsi.max.cmd_per_lun; + scsihost->max_sectors = + (unsigned short)(virtpcidev->scsi.max.max_io_size >> 9); + scsihost->sg_tablesize = + (unsigned short)(virtpcidev->scsi.max.max_io_size / PAGE_SIZE); + if (scsihost->sg_tablesize > MAX_PHYS_INFO) + scsihost->sg_tablesize = MAX_PHYS_INFO; + + /* this creates "host%d" in sysfs. If 2nd argument is NULL, + * then this generic /sys/devices/platform/host? device is + * created and /sys/scsi_host/host? -> + * /sys/devices/platform/host? If 2nd argument is not NULL, + * then this generic /sys/devices//host? is created and + * host? points to that device instead. + */ + error = scsi_add_host(scsihost, &virtpcidev->generic_dev); + if (error) { + POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR); + /* decr refcount on scsihost which was incremented by + * scsi_add_host so the scsi_host gets deleted + */ + scsi_host_put(scsihost); + return -ENODEV; + } + + virthbainfo = (struct virthba_info *)scsihost->hostdata; + memset(virthbainfo, 0, sizeof(struct virthba_info)); + for (i = 0; i < VIRTHBASOPENMAX; i++) { + if (!virthbas_open[i].virthbainfo) { + virthbas_open[i].virthbainfo = virthbainfo; + break; + } + } + virthbainfo->interrupt_vector = -1; + virthbainfo->chinfo.queueinfo = &virtpcidev->queueinfo; + virthbainfo->virtpcidev = virtpcidev; + spin_lock_init(&virthbainfo->chinfo.insertlock); + + init_waitqueue_head(&virthbainfo->rsp_queue); + spin_lock_init(&virthbainfo->privlock); + memset(&virthbainfo->pending, 0, sizeof(virthbainfo->pending)); + virthbainfo->serverdown = false; + virthbainfo->serverchangingstate = false; + + virthbainfo->intr = virtpcidev->intr; + /* save of host within virthba_info */ + virthbainfo->scsihost = scsihost; + + /* save of host within virtpci_dev */ + virtpcidev->scsi.scsihost = scsihost; + + /* Setup workqueue for serverdown messages */ + INIT_WORK(&virthbainfo->serverdown_completion, + virthba_serverdown_complete); + + writeq(readq(&virthbainfo->chinfo.queueinfo->chan->features) | + ULTRA_IO_CHANNEL_IS_POLLING, + &virthbainfo->chinfo.queueinfo->chan->features); + /* start thread that will receive scsicmnd responses */ + + channel_header = virthbainfo->chinfo.queueinfo->chan; + pqhdr = (struct signal_queue_header __iomem *) + ((char __iomem *)channel_header + + readq(&channel_header->ch_space_offset)) + IOCHAN_FROM_IOPART; + virthbainfo->flags_addr = &pqhdr->features; + + if (!uisthread_start(&virthbainfo->chinfo.threadinfo, + process_incoming_rsps, + virthbainfo, "vhba_incoming")) { + /* decr refcount on scsihost which was incremented by + * scsi_add_host so the scsi_host gets deleted + */ + POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR); + scsi_host_put(scsihost); + return -ENODEV; + } + virthbainfo->interrupt_vector = + virthbainfo->intr.recv_irq_handle & INTERRUPT_VECTOR_MASK; + rsp = request_irq(virthbainfo->interrupt_vector, handler, IRQF_SHARED, + scsihost->hostt->name, virthbainfo); + if (rsp != 0) { + virthbainfo->interrupt_vector = -1; + POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR); + } else { + u64 __iomem *features_addr = + &virthbainfo->chinfo.queueinfo->chan->features; + mask = ~(ULTRA_IO_CHANNEL_IS_POLLING | + ULTRA_IO_DRIVER_DISABLES_INTS); + uisqueue_interlocked_and(features_addr, mask); + mask = ULTRA_IO_DRIVER_ENABLES_INTS; + uisqueue_interlocked_or(features_addr, mask); + rsltq_wait_usecs = 4000000; + } + + scsi_scan_host(scsihost); + + POSTCODE_LINUX_2(VHBA_PROBE_EXIT_PC, POSTCODE_SEVERITY_INFO); + return 0; +} + +static void +virthba_remove(struct virtpci_dev *virtpcidev) +{ + struct virthba_info *virthbainfo; + struct Scsi_Host *scsihost = + (struct Scsi_Host *)virtpcidev->scsi.scsihost; + + virthbainfo = (struct virthba_info *)scsihost->hostdata; + if (virthbainfo->interrupt_vector != -1) + free_irq(virthbainfo->interrupt_vector, virthbainfo); + + scsi_remove_host(scsihost); + + uisthread_stop(&virthbainfo->chinfo.threadinfo); + + /* decr refcount on scsihost which was incremented by + * scsi_add_host so the scsi_host gets deleted + */ + scsi_host_put(scsihost); +} + +static int +forward_vdiskmgmt_command(enum vdisk_mgmt_types vdiskcmdtype, + struct Scsi_Host *scsihost, + struct uisscsi_dest *vdest) +{ + struct uiscmdrsp *cmdrsp; + struct virthba_info *virthbainfo = + (struct virthba_info *)scsihost->hostdata; + int notifyresult = 0xffff; + wait_queue_head_t notifyevent; + + if (virthbainfo->serverdown || virthbainfo->serverchangingstate) + return FAILED; + + cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC); + if (!cmdrsp) + return FAILED; /* reject */ + + init_waitqueue_head(¬ifyevent); + + /* issue VDISK_MGMT_CMD + * set type to command - as opposed to task mgmt + */ + cmdrsp->cmdtype = CMD_VDISKMGMT_TYPE; + /* specify the event that has to be triggered when this cmd is + * complete + */ + cmdrsp->vdiskmgmt.notify = (void *)¬ifyevent; + cmdrsp->vdiskmgmt.notifyresult = (void *)¬ifyresult; + + /* save destination */ + cmdrsp->vdiskmgmt.vdisktype = vdiskcmdtype; + cmdrsp->vdiskmgmt.vdest.channel = vdest->channel; + cmdrsp->vdiskmgmt.vdest.id = vdest->id; + cmdrsp->vdiskmgmt.vdest.lun = vdest->lun; + cmdrsp->vdiskmgmt.scsicmd = + (void *)(uintptr_t) + add_scsipending_entry_with_wait(virthbainfo, CMD_VDISKMGMT_TYPE, + (void *)cmdrsp); + + uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo, + cmdrsp, IOCHAN_TO_IOPART, + &virthbainfo->chinfo.insertlock, + DONT_ISSUE_INTERRUPT, (u64)NULL, + OK_TO_WAIT, "vhba"); + wait_event(notifyevent, notifyresult != 0xffff); + kfree(cmdrsp); + return SUCCESS; +} + +/*****************************************************/ +/* Scsi Host support functions */ +/*****************************************************/ + +static int +forward_taskmgmt_command(enum task_mgmt_types tasktype, + struct scsi_device *scsidev) +{ + struct uiscmdrsp *cmdrsp; + struct virthba_info *virthbainfo = + (struct virthba_info *)scsidev->host->hostdata; + int notifyresult = 0xffff; + wait_queue_head_t notifyevent; + + if (virthbainfo->serverdown || virthbainfo->serverchangingstate) + return FAILED; + + cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC); + if (!cmdrsp) + return FAILED; /* reject */ + + init_waitqueue_head(¬ifyevent); + + /* issue TASK_MGMT_ABORT_TASK */ + /* set type to command - as opposed to task mgmt */ + cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE; + /* specify the event that has to be triggered when this */ + /* cmd is complete */ + cmdrsp->scsitaskmgmt.notify = (void *)¬ifyevent; + cmdrsp->scsitaskmgmt.notifyresult = (void *)¬ifyresult; + + /* save destination */ + cmdrsp->scsitaskmgmt.tasktype = tasktype; + cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel; + cmdrsp->scsitaskmgmt.vdest.id = scsidev->id; + cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun; + cmdrsp->scsitaskmgmt.scsicmd = + (void *)(uintptr_t) + add_scsipending_entry_with_wait(virthbainfo, + CMD_SCSITASKMGMT_TYPE, + (void *)cmdrsp); + + uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo, + cmdrsp, IOCHAN_TO_IOPART, + &virthbainfo->chinfo.insertlock, + DONT_ISSUE_INTERRUPT, (u64)NULL, + OK_TO_WAIT, "vhba"); + wait_event(notifyevent, notifyresult != 0xffff); + kfree(cmdrsp); + return SUCCESS; +} + +/* The abort handler returns SUCCESS if it has succeeded to make LLDD + * and all related hardware forget about the scmd. + */ +static int +virthba_abort_handler(struct scsi_cmnd *scsicmd) +{ + /* issue TASK_MGMT_ABORT_TASK */ + struct scsi_device *scsidev; + struct virtdisk_info *vdisk; + + scsidev = scsicmd->device; + for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head; + vdisk->next; vdisk = vdisk->next) { + if ((scsidev->channel == vdisk->channel) && + (scsidev->id == vdisk->id) && + (scsidev->lun == vdisk->lun)) { + if (atomic_read(&vdisk->error_count) < + VIRTHBA_ERROR_COUNT) { + atomic_inc(&vdisk->error_count); + POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC, + POSTCODE_SEVERITY_INFO); + } else + atomic_set(&vdisk->ios_threshold, + IOS_ERROR_THRESHOLD); + } + } + return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd->device); +} + +static int +virthba_bus_reset_handler(struct scsi_cmnd *scsicmd) +{ + /* issue TASK_MGMT_TARGET_RESET for each target on the bus */ + struct scsi_device *scsidev; + struct virtdisk_info *vdisk; + + scsidev = scsicmd->device; + for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head; + vdisk->next; vdisk = vdisk->next) { + if ((scsidev->channel == vdisk->channel) && + (scsidev->id == vdisk->id) && + (scsidev->lun == vdisk->lun)) { + if (atomic_read(&vdisk->error_count) < + VIRTHBA_ERROR_COUNT) { + atomic_inc(&vdisk->error_count); + POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC, + POSTCODE_SEVERITY_INFO); + } else + atomic_set(&vdisk->ios_threshold, + IOS_ERROR_THRESHOLD); + } + } + return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd->device); +} + +static int +virthba_device_reset_handler(struct scsi_cmnd *scsicmd) +{ + /* issue TASK_MGMT_LUN_RESET */ + struct scsi_device *scsidev; + struct virtdisk_info *vdisk; + + scsidev = scsicmd->device; + for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head; + vdisk->next; vdisk = vdisk->next) { + if ((scsidev->channel == vdisk->channel) && + (scsidev->id == vdisk->id) && + (scsidev->lun == vdisk->lun)) { + if (atomic_read(&vdisk->error_count) < + VIRTHBA_ERROR_COUNT) { + atomic_inc(&vdisk->error_count); + POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC, + POSTCODE_SEVERITY_INFO); + } else + atomic_set(&vdisk->ios_threshold, + IOS_ERROR_THRESHOLD); + } + } + return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd->device); +} + +static int +virthba_host_reset_handler(struct scsi_cmnd *scsicmd) +{ + /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */ + return SUCCESS; +} + +static char virthba_get_info_str[256]; + +static const char * +virthba_get_info(struct Scsi_Host *shp) +{ + /* Return version string */ + sprintf(virthba_get_info_str, "virthba, version %s\n", VIRTHBA_VERSION); + return virthba_get_info_str; +} + +static int +virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg) +{ + return -EINVAL; +} + +/* This returns SCSI_MLQUEUE_DEVICE_BUSY if the signal queue to IOpart + * is full. + */ +static int +virthba_queue_command_lck(struct scsi_cmnd *scsicmd, + void (*virthba_cmnd_done)(struct scsi_cmnd *)) +{ + struct scsi_device *scsidev = scsicmd->device; + int insert_location; + unsigned char op; + unsigned char *cdb = scsicmd->cmnd; + struct Scsi_Host *scsihost = scsidev->host; + struct uiscmdrsp *cmdrsp; + unsigned int i; + struct virthba_info *virthbainfo = + (struct virthba_info *)scsihost->hostdata; + struct scatterlist *sg = NULL; + struct scatterlist *sgl = NULL; + int sg_failed = 0; + + if (virthbainfo->serverdown || virthbainfo->serverchangingstate) + return SCSI_MLQUEUE_DEVICE_BUSY; + cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC); + if (!cmdrsp) + return 1; /* reject the command */ + + /* now saving everything we need from scsi_cmd into cmdrsp + * before we queue cmdrsp set type to command - as opposed to + * task mgmt + */ + cmdrsp->cmdtype = CMD_SCSI_TYPE; + /* save the pending insertion location. Deletion from pending + * will return the scsicmd pointer for completion + */ + insert_location = + add_scsipending_entry(virthbainfo, CMD_SCSI_TYPE, (void *)scsicmd); + if (insert_location != -1) { + cmdrsp->scsi.scsicmd = (void *)(uintptr_t)insert_location; + } else { + kfree(cmdrsp); + return SCSI_MLQUEUE_DEVICE_BUSY; + } + /* save done function that we have call when cmd is complete */ + scsicmd->scsi_done = virthba_cmnd_done; + /* save destination */ + cmdrsp->scsi.vdest.channel = scsidev->channel; + cmdrsp->scsi.vdest.id = scsidev->id; + cmdrsp->scsi.vdest.lun = scsidev->lun; + /* save datadir */ + cmdrsp->scsi.data_dir = scsicmd->sc_data_direction; + memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE); + + cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd); + + /* keep track of the max buffer length so far. */ + if (cmdrsp->scsi.bufflen > max_buff_len) + max_buff_len = cmdrsp->scsi.bufflen; + + if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) { + del_scsipending_entry(virthbainfo, (uintptr_t)insert_location); + kfree(cmdrsp); + return 1; /* reject the command */ + } + + /* This is what we USED to do when we assumed we were running */ + /* uissd & virthba on the same Linux system. */ + /* cmdrsp->scsi.buffer = scsicmd->request_buffer; */ + /* The following code does NOT make that assumption. */ + /* convert buffer to phys information */ + if (scsi_sg_count(scsicmd) == 0) { + if (scsi_bufflen(scsicmd) > 0) { + BUG_ON(scsi_sg_count(scsicmd) == 0); + } + } else { + /* buffer is scatterlist - copy it out */ + sgl = scsi_sglist(scsicmd); + + for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) { + cmdrsp->scsi.gpi_list[i].address = sg_phys(sg); + cmdrsp->scsi.gpi_list[i].length = sg->length; + } + + if (sg_failed) { + /* BUG(); ***** For now, let it fail in uissd + * if it is a problem, as it might just + * work + */ + } + + cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd); + } + + op = cdb[0]; + i = uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo, + cmdrsp, IOCHAN_TO_IOPART, + &virthbainfo->chinfo. + insertlock, + DONT_ISSUE_INTERRUPT, + (u64)NULL, DONT_WAIT, "vhba"); + if (i == 0) { + /* queue must be full - and we said don't wait - return busy */ + kfree(cmdrsp); + del_scsipending_entry(virthbainfo, (uintptr_t)insert_location); + return SCSI_MLQUEUE_DEVICE_BUSY; + } + + /* we're done with cmdrsp space - data from it has been copied + * into channel - free it now. + */ + kfree(cmdrsp); + return 0; /* non-zero implies host/device is busy */ +} + +static int +virthba_slave_alloc(struct scsi_device *scsidev) +{ + /* this called by the midlayer before scan for new devices - + * LLD can alloc any struct & do init if needed. + */ + struct virtdisk_info *vdisk; + struct virtdisk_info *tmpvdisk; + struct virthba_info *virthbainfo; + struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host; + + virthbainfo = (struct virthba_info *)scsihost->hostdata; + if (!virthbainfo) + return 0; /* even though we errored, treat as success */ + + for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) { + if (vdisk->next->valid && + (vdisk->next->channel == scsidev->channel) && + (vdisk->next->id == scsidev->id) && + (vdisk->next->lun == scsidev->lun)) + return 0; + } + tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC); + if (!tmpvdisk) + return 0; + + tmpvdisk->channel = scsidev->channel; + tmpvdisk->id = scsidev->id; + tmpvdisk->lun = scsidev->lun; + tmpvdisk->valid = 1; + vdisk->next = tmpvdisk; + return 0; /* success */ +} + +static int +virthba_slave_configure(struct scsi_device *scsidev) +{ + return 0; /* success */ +} + +static void +virthba_slave_destroy(struct scsi_device *scsidev) +{ + /* midlevel calls this after device has been quiesced and + * before it is to be deleted. + */ + struct virtdisk_info *vdisk, *delvdisk; + struct virthba_info *virthbainfo; + struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host; + + virthbainfo = (struct virthba_info *)scsihost->hostdata; + for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) { + if (vdisk->next->valid && + (vdisk->next->channel == scsidev->channel) && + (vdisk->next->id == scsidev->id) && + (vdisk->next->lun == scsidev->lun)) { + delvdisk = vdisk->next; + vdisk->next = vdisk->next->next; + kfree(delvdisk); + return; + } + } +} + +/*****************************************************/ +/* Scsi Cmnd support thread */ +/*****************************************************/ + +static void +do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) +{ + struct virtdisk_info *vdisk; + struct scsi_device *scsidev; + struct sense_data *sd; + + scsidev = scsicmd->device; + memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE); + sd = (struct sense_data *)scsicmd->sense_buffer; + + /* Do not log errors for disk-not-present inquiries */ + if ((cmdrsp->scsi.cmnd[0] == INQUIRY) && + (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) && + (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)) + return; + + /* Okay see what our error_count is here.... */ + for (vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head; + vdisk->next; vdisk = vdisk->next) { + if ((scsidev->channel != vdisk->channel) || + (scsidev->id != vdisk->id) || + (scsidev->lun != vdisk->lun)) + continue; + + if (atomic_read(&vdisk->error_count) < VIRTHBA_ERROR_COUNT) { + atomic_inc(&vdisk->error_count); + atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD); + } + } +} + +static void +do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) +{ + struct scsi_device *scsidev; + unsigned char buf[36]; + struct scatterlist *sg; + unsigned int i; + char *thispage; + char *thispage_orig; + int bufind = 0; + struct virtdisk_info *vdisk; + + scsidev = scsicmd->device; + if ((cmdrsp->scsi.cmnd[0] == INQUIRY) && + (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) { + if (cmdrsp->scsi.no_disk_result == 0) + return; + + /* Linux scsi code is weird; it wants + * a device at Lun 0 to issue report + * luns, but we don't want a disk + * there so we'll present a processor + * there. */ + SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen, + scsidev->lun, + DEV_DISK_CAPABLE_NOT_PRESENT, + DEV_NOT_CAPABLE); + + if (scsi_sg_count(scsicmd) == 0) { + if (scsi_bufflen(scsicmd) > 0) { + BUG_ON(scsi_sg_count(scsicmd) == + 0); + } + memcpy(scsi_sglist(scsicmd), buf, + cmdrsp->scsi.bufflen); + return; + } + + sg = scsi_sglist(scsicmd); + for (i = 0; i < scsi_sg_count(scsicmd); i++) { + thispage_orig = kmap_atomic(sg_page(sg + i)); + thispage = (void *)((unsigned long)thispage_orig | + sg[i].offset); + memcpy(thispage, buf + bufind, sg[i].length); + kunmap_atomic(thispage_orig); + bufind += sg[i].length; + } + } else { + vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head; + for ( ; vdisk->next; vdisk = vdisk->next) { + if ((scsidev->channel != vdisk->channel) || + (scsidev->id != vdisk->id) || + (scsidev->lun != vdisk->lun)) + continue; + + if (atomic_read(&vdisk->ios_threshold) > 0) { + atomic_dec(&vdisk->ios_threshold); + if (atomic_read(&vdisk->ios_threshold) == 0) { + atomic_set(&vdisk->error_count, 0); + } + } + } + } +} + +static void +complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) +{ + /* take what we need out of cmdrsp and complete the scsicmd */ + scsicmd->result = cmdrsp->scsi.linuxstat; + if (cmdrsp->scsi.linuxstat) + do_scsi_linuxstat(cmdrsp, scsicmd); + else + do_scsi_nolinuxstat(cmdrsp, scsicmd); + + if (scsicmd->scsi_done) + scsicmd->scsi_done(scsicmd); +} + +static inline void +complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp) +{ + /* copy the result of the taskmgmt and */ + /* wake up the error handler that is waiting for this */ + *(int *)cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result; + wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify); +} + +static inline void +complete_taskmgmt_command(struct uiscmdrsp *cmdrsp) +{ + /* copy the result of the taskmgmt and */ + /* wake up the error handler that is waiting for this */ + *(int *)cmdrsp->scsitaskmgmt.notifyresult = + cmdrsp->scsitaskmgmt.result; + wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify); +} + +static void +drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc, + struct uiscmdrsp *cmdrsp) +{ + unsigned long flags; + int qrslt = 0; + struct scsi_cmnd *scsicmd; + struct Scsi_Host *shost = virthbainfo->scsihost; + + while (1) { + spin_lock_irqsave(&virthbainfo->chinfo.insertlock, flags); + if (!spar_channel_client_acquire_os(dc->queueinfo->chan, + "vhba")) { + spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, + flags); + virthbainfo->acquire_failed_cnt++; + break; + } + qrslt = uisqueue_get_cmdrsp(dc->queueinfo, cmdrsp, + IOCHAN_FROM_IOPART); + spar_channel_client_release_os(dc->queueinfo->chan, "vhba"); + spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, flags); + if (qrslt == 0) + break; + if (cmdrsp->cmdtype == CMD_SCSI_TYPE) { + /* scsicmd location is returned by the + * deletion + */ + scsicmd = del_scsipending_entry(virthbainfo, + (uintptr_t) + cmdrsp->scsi.scsicmd); + if (!scsicmd) + break; + /* complete the orig cmd */ + complete_scsi_command(cmdrsp, scsicmd); + } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) { + if (!del_scsipending_entry(virthbainfo, + (uintptr_t)cmdrsp->scsitaskmgmt.scsicmd)) + break; + complete_taskmgmt_command(cmdrsp); + } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) { + /* The vHba pointer has no meaning in + * a Client/Guest Partition. Let's be + * safe and set it to NULL now. Do + * not use it here! */ + cmdrsp->disknotify.v_hba = NULL; + process_disk_notify(shost, cmdrsp); + } else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) { + if (!del_scsipending_entry(virthbainfo, + (uintptr_t) + cmdrsp->vdiskmgmt.scsicmd)) + break; + complete_vdiskmgmt_command(cmdrsp); + } + /* cmdrsp is now available for reuse */ + } +} + +/* main function for the thread that waits for scsi commands to arrive + * in a specified queue + */ +static int +process_incoming_rsps(void *v) +{ + struct virthba_info *virthbainfo = v; + struct chaninfo *dc = &virthbainfo->chinfo; + struct uiscmdrsp *cmdrsp = NULL; + const int SZ = sizeof(struct uiscmdrsp); + u64 mask; + unsigned long long rc1; + + UIS_DAEMONIZE("vhba_incoming"); + /* alloc once and reuse */ + cmdrsp = kmalloc(SZ, GFP_ATOMIC); + if (!cmdrsp) { + complete_and_exit(&dc->threadinfo.has_stopped, 0); + return 0; + } + mask = ULTRA_CHANNEL_ENABLE_INTS; + while (1) { + if (kthread_should_stop()) + break; + wait_event_interruptible_timeout(virthbainfo->rsp_queue, + (atomic_read(&virthbainfo->interrupt_rcvd) == 1), + usecs_to_jiffies(rsltq_wait_usecs)); + atomic_set(&virthbainfo->interrupt_rcvd, 0); + /* drain queue */ + drain_queue(virthbainfo, dc, cmdrsp); + rc1 = uisqueue_interlocked_or(virthbainfo->flags_addr, mask); + } + + kfree(cmdrsp); + + complete_and_exit(&dc->threadinfo.has_stopped, 0); +} + +/*****************************************************/ +/* Debugfs filesystem functions */ +/*****************************************************/ + +static ssize_t info_debugfs_read(struct file *file, + char __user *buf, size_t len, loff_t *offset) +{ + ssize_t bytes_read = 0; + int str_pos = 0; + u64 phys_flags_addr; + int i; + struct virthba_info *virthbainfo; + char *vbuf; + + if (len > MAX_BUF) + len = MAX_BUF; + vbuf = kzalloc(len, GFP_KERNEL); + if (!vbuf) + return -ENOMEM; + + for (i = 0; i < VIRTHBASOPENMAX; i++) { + if (!virthbas_open[i].virthbainfo) + continue; + + virthbainfo = virthbas_open[i].virthbainfo; + + str_pos += scnprintf(vbuf + str_pos, + len - str_pos, "max_buff_len:%u\n", + max_buff_len); + + str_pos += scnprintf(vbuf + str_pos, len - str_pos, + "\nvirthba result queue poll wait:%d usecs.\n", + rsltq_wait_usecs); + str_pos += scnprintf(vbuf + str_pos, len - str_pos, + "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n", + virthbainfo->interrupts_rcvd, + virthbainfo->interrupts_disabled); + str_pos += scnprintf(vbuf + str_pos, + len - str_pos, "\ninterrupts_notme = %llu,\n", + virthbainfo->interrupts_notme); + phys_flags_addr = virt_to_phys((__force void *) + virthbainfo->flags_addr); + str_pos += scnprintf(vbuf + str_pos, len - str_pos, + "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n", + virthbainfo->flags_addr, phys_flags_addr, + (__le64)readq(virthbainfo->flags_addr)); + str_pos += scnprintf(vbuf + str_pos, + len - str_pos, "acquire_failed_cnt:%llu\n", + virthbainfo->acquire_failed_cnt); + str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n"); + } + + bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos); + kfree(vbuf); + return bytes_read; +} + +static ssize_t enable_ints_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + char buf[4]; + int i, new_value; + struct virthba_info *virthbainfo; + + u64 __iomem *features_addr; + u64 mask; + + if (count >= ARRAY_SIZE(buf)) + return -EINVAL; + + buf[count] = '\0'; + if (copy_from_user(buf, buffer, count)) + return -EFAULT; + + i = kstrtoint(buf, 10, &new_value); + + if (i != 0) + return -EFAULT; + + /* set all counts to new_value usually 0 */ + for (i = 0; i < VIRTHBASOPENMAX; i++) { + if (virthbas_open[i].virthbainfo) { + virthbainfo = virthbas_open[i].virthbainfo; + features_addr = + &virthbainfo->chinfo.queueinfo->chan->features; + if (new_value == 1) { + mask = ~(ULTRA_IO_CHANNEL_IS_POLLING | + ULTRA_IO_DRIVER_DISABLES_INTS); + uisqueue_interlocked_and(features_addr, mask); + mask = ULTRA_IO_DRIVER_ENABLES_INTS; + uisqueue_interlocked_or(features_addr, mask); + rsltq_wait_usecs = 4000000; + } else { + mask = ~(ULTRA_IO_DRIVER_ENABLES_INTS | + ULTRA_IO_DRIVER_DISABLES_INTS); + uisqueue_interlocked_and(features_addr, mask); + mask = ULTRA_IO_CHANNEL_IS_POLLING; + uisqueue_interlocked_or(features_addr, mask); + rsltq_wait_usecs = 4000; + } + } + } + return count; +} + +/* As per VirtpciFunc returns 1 for success and 0 for failure */ +static int +virthba_serverup(struct virtpci_dev *virtpcidev) +{ + struct virthba_info *virthbainfo = + (struct virthba_info *)((struct Scsi_Host *)virtpcidev->scsi. + scsihost)->hostdata; + + if (!virthbainfo->serverdown) + return 1; + + if (virthbainfo->serverchangingstate) + return 0; + + virthbainfo->serverchangingstate = true; + /* Must transition channel to ATTACHED state BEFORE we + * can start using the device again + */ + SPAR_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan, + dev_name(&virtpcidev->generic_dev), + CHANNELCLI_ATTACHED, NULL); + + /* Start Processing the IOVM Response Queue Again */ + if (!uisthread_start(&virthbainfo->chinfo.threadinfo, + process_incoming_rsps, + virthbainfo, "vhba_incoming")) { + return 0; + } + virthbainfo->serverdown = false; + virthbainfo->serverchangingstate = false; + + return 1; +} + +static void +virthba_serverdown_complete(struct work_struct *work) +{ + struct virthba_info *virthbainfo; + struct virtpci_dev *virtpcidev; + int i; + struct scsipending *pendingdel = NULL; + struct scsi_cmnd *scsicmd = NULL; + struct uiscmdrsp *cmdrsp; + unsigned long flags; + + virthbainfo = container_of(work, struct virthba_info, + serverdown_completion); + + /* Stop Using the IOVM Response Queue (queue should be drained + * by the end) + */ + uisthread_stop(&virthbainfo->chinfo.threadinfo); + + /* Fail Commands that weren't completed */ + spin_lock_irqsave(&virthbainfo->privlock, flags); + for (i = 0; i < MAX_PENDING_REQUESTS; i++) { + pendingdel = &virthbainfo->pending[i]; + switch (pendingdel->cmdtype) { + case CMD_SCSI_TYPE: + scsicmd = (struct scsi_cmnd *)pendingdel->sent; + scsicmd->result = DID_RESET << 16; + if (scsicmd->scsi_done) + scsicmd->scsi_done(scsicmd); + break; + case CMD_SCSITASKMGMT_TYPE: + cmdrsp = (struct uiscmdrsp *)pendingdel->sent; + wake_up_all((wait_queue_head_t *) + cmdrsp->scsitaskmgmt.notify); + *(int *)cmdrsp->scsitaskmgmt.notifyresult = + TASK_MGMT_FAILED; + break; + case CMD_VDISKMGMT_TYPE: + cmdrsp = (struct uiscmdrsp *)pendingdel->sent; + *(int *)cmdrsp->vdiskmgmt.notifyresult = + VDISK_MGMT_FAILED; + wake_up_all((wait_queue_head_t *) + cmdrsp->vdiskmgmt.notify); + break; + default: + break; + } + pendingdel->cmdtype = 0; + pendingdel->sent = NULL; + } + spin_unlock_irqrestore(&virthbainfo->privlock, flags); + + virtpcidev = virthbainfo->virtpcidev; + + virthbainfo->serverdown = true; + virthbainfo->serverchangingstate = false; + /* Return the ServerDown response to Command */ + visorchipset_device_pause_response(virtpcidev->bus_no, + virtpcidev->device_no, 0); +} + +/* As per VirtpciFunc returns 1 for success and 0 for failure */ +static int +virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state) +{ + int stat = 1; + + struct virthba_info *virthbainfo = + (struct virthba_info *)((struct Scsi_Host *)virtpcidev->scsi. + scsihost)->hostdata; + + if (!virthbainfo->serverdown && !virthbainfo->serverchangingstate) { + virthbainfo->serverchangingstate = true; + queue_work(virthba_serverdown_workqueue, + &virthbainfo->serverdown_completion); + } else if (virthbainfo->serverchangingstate) { + stat = 0; + } + + return stat; +} + +/*****************************************************/ +/* Module Init & Exit functions */ +/*****************************************************/ + +static int __init +virthba_parse_line(char *str) +{ + return 1; +} + +static void __init +virthba_parse_options(char *line) +{ + char *next = line; + + POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); + if (!line || !*line) + return; + while ((line = next)) { + next = strchr(line, ' '); + if (next) + *next++ = 0; + virthba_parse_line(line); + } + + POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO); +} + +static int __init +virthba_mod_init(void) +{ + int error; + int i; + + if (!unisys_spar_platform) + return -ENODEV; + + POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); + virthba_parse_options(virthba_options); + + error = virtpci_register_driver(&virthba_driver); + if (error < 0) { + POSTCODE_LINUX_3(VHBA_CREATE_FAILURE_PC, error, + POSTCODE_SEVERITY_ERR); + } else { + /* create the debugfs directories and entries */ + virthba_debugfs_dir = debugfs_create_dir("virthba", NULL); + debugfs_create_file("info", S_IRUSR, virthba_debugfs_dir, + NULL, &debugfs_info_fops); + debugfs_create_u32("rqwait_usecs", S_IRUSR | S_IWUSR, + virthba_debugfs_dir, &rsltq_wait_usecs); + debugfs_create_file("enable_ints", S_IWUSR, + virthba_debugfs_dir, NULL, + &debugfs_enable_ints_fops); + /* Initialize dar_work_queue */ + INIT_WORK(&dar_work_queue, do_disk_add_remove); + spin_lock_init(&dar_work_queue_lock); + + /* clear out array */ + for (i = 0; i < VIRTHBASOPENMAX; i++) + virthbas_open[i].virthbainfo = NULL; + /* Initialize the serverdown workqueue */ + virthba_serverdown_workqueue = + create_singlethread_workqueue("virthba_serverdown"); + if (!virthba_serverdown_workqueue) { + POSTCODE_LINUX_2(VHBA_CREATE_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + error = -1; + } + } + + POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO); + return error; +} + +static ssize_t +virthba_acquire_lun(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct uisscsi_dest vdest; + struct Scsi_Host *shost = class_to_shost(cdev); + int i; + + i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun); + if (i != 3) + return i; + + return forward_vdiskmgmt_command(VDISK_MGMT_ACQUIRE, shost, &vdest); +} + +static ssize_t +virthba_release_lun(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct uisscsi_dest vdest; + struct Scsi_Host *shost = class_to_shost(cdev); + int i; + + i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun); + if (i != 3) + return i; + + return forward_vdiskmgmt_command(VDISK_MGMT_RELEASE, shost, &vdest); +} + +#define CLASS_DEVICE_ATTR(_name, _mode, _show, _store) \ + struct device_attribute class_device_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + +static CLASS_DEVICE_ATTR(acquire_lun, S_IWUSR, NULL, virthba_acquire_lun); +static CLASS_DEVICE_ATTR(release_lun, S_IWUSR, NULL, virthba_release_lun); + +static DEVICE_ATTRIBUTE *virthba_shost_attrs[] = { + &class_device_attr_acquire_lun, + &class_device_attr_release_lun, + NULL +}; + +static void __exit +virthba_mod_exit(void) +{ + virtpci_unregister_driver(&virthba_driver); + /* unregister is going to call virthba_remove */ + /* destroy serverdown completion workqueue */ + if (virthba_serverdown_workqueue) { + destroy_workqueue(virthba_serverdown_workqueue); + virthba_serverdown_workqueue = NULL; + } + + debugfs_remove_recursive(virthba_debugfs_dir); +} + +/* specify function to be run at module insertion time */ +module_init(virthba_mod_init); + +/* specify function to be run when module is removed */ +module_exit(virthba_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Usha Srinivasan"); +MODULE_ALIAS("uisvirthba"); + /* this is extracted during depmod and kept in modules.dep */ +/* module parameter */ +module_param(virthba_options, charp, S_IRUGO); diff --git a/kernel/drivers/staging/unisys/virthba/virthba.h b/kernel/drivers/staging/unisys/virthba/virthba.h new file mode 100644 index 000000000..59901668d --- /dev/null +++ b/kernel/drivers/staging/unisys/virthba/virthba.h @@ -0,0 +1,27 @@ +/* virthba.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* + * Unisys Virtual HBA driver header + */ + +#ifndef __VIRTHBA_H__ +#define __VIRTHBA_H__ + +#define VIRTHBA_VERSION "01.00" + +#endif /* __VIRTHBA_H__ */ diff --git a/kernel/drivers/staging/unisys/virtpci/Kconfig b/kernel/drivers/staging/unisys/virtpci/Kconfig new file mode 100644 index 000000000..6d19482ce --- /dev/null +++ b/kernel/drivers/staging/unisys/virtpci/Kconfig @@ -0,0 +1,10 @@ +# +# Unisys virtpci configuration +# + +config UNISYS_VIRTPCI + tristate "Unisys virtpci driver" + select UNISYS_UISLIB + ---help--- + If you say Y here, you will enable the Unisys virtpci driver. + diff --git a/kernel/drivers/staging/unisys/virtpci/Makefile b/kernel/drivers/staging/unisys/virtpci/Makefile new file mode 100644 index 000000000..a26c69621 --- /dev/null +++ b/kernel/drivers/staging/unisys/virtpci/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for Unisys virtpci +# + +obj-$(CONFIG_UNISYS_VIRTPCI) += virtpci.o + +ccflags-y += -Idrivers/staging/unisys/include +ccflags-y += -Idrivers/staging/unisys/uislib +ccflags-y += -Idrivers/staging/unisys/common-spar/include +ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels diff --git a/kernel/drivers/staging/unisys/virtpci/virtpci.c b/kernel/drivers/staging/unisys/virtpci/virtpci.c new file mode 100644 index 000000000..d5ad01783 --- /dev/null +++ b/kernel/drivers/staging/unisys/virtpci/virtpci.c @@ -0,0 +1,1394 @@ +/* virtpci.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#define EXPORT_SYMTAB + +#include +#ifdef CONFIG_MODVERSIONS +#include +#endif +#include "diagnostics/appos_subsystems.h" +#include "uisutils.h" +#include "vbuschannel.h" +#include "vbushelper.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "version.h" +#include "guestlinuxdebug.h" +#include "timskmod.h" + +struct driver_private { + struct kobject kobj; + struct klist klist_devices; + struct klist_node knode_bus; + struct module_kobject *mkobj; + struct device_driver *driver; +}; + +#define to_driver(obj) container_of(obj, struct driver_private, kobj) + +/* bus_id went away in 2.6.30 - the size was 20 bytes, so we'll define + * it ourselves, and a macro to make getting the field a bit simpler. + */ +#ifndef BUS_ID_SIZE +#define BUS_ID_SIZE 20 +#endif + +#define BUS_ID(x) dev_name(x) + +/* MAX_BUF = 4 busses x ( 32 devices/bus + 1 busline) x 80 characters + * = 10,560 bytes ~ 2^14 = 16,384 bytes + */ +#define MAX_BUF 16384 + +#include "virtpci.h" + +/* this is shorter than using __FILE__ (full path name) in + * debug/info/error messages + */ +#define CURRENT_FILE_PC VIRT_PCI_PC_virtpci_c +#define __MYFILE__ "virtpci.c" + +#define VIRTPCI_VERSION "01.00" + +/*****************************************************/ +/* Forward declarations */ +/*****************************************************/ + +static int delete_vbus_device(struct device *vbus, void *data); +static int match_busid(struct device *dev, void *data); +static void virtpci_bus_release(struct device *dev); +static void virtpci_device_release(struct device *dev); +static int virtpci_device_add(struct device *parentbus, int devtype, + struct add_virt_guestpart *addparams, + struct scsi_adap_info *scsi, + struct net_adap_info *net); +static int virtpci_device_del(struct device *parentbus, int devtype, + struct vhba_wwnn *wwnn, unsigned char macaddr[]); +static int virtpci_device_serverdown(struct device *parentbus, int devtype, + struct vhba_wwnn *wwnn, + unsigned char macaddr[]); +static int virtpci_device_serverup(struct device *parentbus, int devtype, + struct vhba_wwnn *wwnn, + unsigned char macaddr[]); +static ssize_t virtpci_driver_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf); +static ssize_t virtpci_driver_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t count); +static int virtpci_bus_match(struct device *dev, struct device_driver *drv); +static int virtpci_uevent(struct device *dev, struct kobj_uevent_env *env); +static int virtpci_device_probe(struct device *dev); +static int virtpci_device_remove(struct device *dev); + +static ssize_t info_debugfs_read(struct file *file, char __user *buf, + size_t len, loff_t *offset); + +static const struct file_operations debugfs_info_fops = { + .read = info_debugfs_read, +}; + +/*****************************************************/ +/* Globals */ +/*****************************************************/ + +/* methods in bus_type struct allow the bus code to serve as an + * intermediary between the device core and individual device core and + * individual drivers + */ +static struct bus_type virtpci_bus_type = { + .name = "uisvirtpci", + .match = virtpci_bus_match, + .uevent = virtpci_uevent, +}; + +static struct device virtpci_rootbus_device = { + .init_name = "vbusroot", /* root bus */ + .release = virtpci_bus_release +}; + +/* filled in with info about parent chipset driver when we register with it */ +static struct ultra_vbus_deviceinfo chipset_driver_info; + +static const struct sysfs_ops virtpci_driver_sysfs_ops = { + .show = virtpci_driver_attr_show, + .store = virtpci_driver_attr_store, +}; + +static struct kobj_type virtpci_driver_kobj_type = { + .sysfs_ops = &virtpci_driver_sysfs_ops, +}; + +static struct virtpci_dev *vpcidev_list_head; +static DEFINE_RWLOCK(vpcidev_list_lock); + +/* filled in with info about this driver, wrt it servicing client busses */ +static struct ultra_vbus_deviceinfo bus_driver_info; + +/*****************************************************/ +/* debugfs entries */ +/*****************************************************/ +/* dentry is used to create the debugfs entry directory + * for virtpci + */ +static struct dentry *virtpci_debugfs_dir; + +struct virtpci_busdev { + struct device virtpci_bus_device; +}; + +/*****************************************************/ +/* Local functions */ +/*****************************************************/ + +static inline +int WAIT_FOR_IO_CHANNEL(struct spar_io_channel_protocol __iomem *chanptr) +{ + int count = 120; + + while (count > 0) { + if (SPAR_CHANNEL_SERVER_READY(&chanptr->channel_header)) + return 1; + UIS_THREAD_WAIT_SEC(1); + count--; + } + return 0; +} + +/* Write the contents of to the ULTRA_VBUS_CHANNEL_PROTOCOL.ChpInfo. */ +static int write_vbus_chp_info(struct spar_vbus_channel_protocol *chan, + struct ultra_vbus_deviceinfo *info) +{ + int off; + + if (!chan) + return -1; + + off = sizeof(struct channel_header) + chan->hdr_info.chp_info_offset; + if (chan->hdr_info.chp_info_offset == 0) { + return -1; + } + memcpy(((u8 *)(chan)) + off, info, sizeof(*info)); + return 0; +} + +/* Write the contents of to the ULTRA_VBUS_CHANNEL_PROTOCOL.BusInfo. */ +static int write_vbus_bus_info(struct spar_vbus_channel_protocol *chan, + struct ultra_vbus_deviceinfo *info) +{ + int off; + + if (!chan) + return -1; + + off = sizeof(struct channel_header) + chan->hdr_info.bus_info_offset; + if (chan->hdr_info.bus_info_offset == 0) + return -1; + memcpy(((u8 *)(chan)) + off, info, sizeof(*info)); + return 0; +} + +/* Write the contents of to the + * ULTRA_VBUS_CHANNEL_PROTOCOL.DevInfo[]. + */ +static int +write_vbus_dev_info(struct spar_vbus_channel_protocol *chan, + struct ultra_vbus_deviceinfo *info, int devix) +{ + int off; + + if (!chan) + return -1; + + off = + (sizeof(struct channel_header) + + chan->hdr_info.dev_info_offset) + + (chan->hdr_info.device_info_struct_bytes * devix); + if (chan->hdr_info.dev_info_offset == 0) + return -1; + + memcpy(((u8 *)(chan)) + off, info, sizeof(*info)); + return 0; +} + +/* adds a vbus + * returns 0 failure, 1 success, + */ +static int add_vbus(struct add_vbus_guestpart *addparams) +{ + int ret; + struct device *vbus; + + vbus = kzalloc(sizeof(*vbus), GFP_ATOMIC); + + POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); + if (!vbus) + return 0; + + dev_set_name(vbus, "vbus%d", addparams->bus_no); + vbus->release = virtpci_bus_release; + vbus->parent = &virtpci_rootbus_device; /* root bus is parent */ + vbus->bus = &virtpci_bus_type; /* bus type */ + vbus->platform_data = (__force void *)addparams->chanptr; + + /* register a virt bus device - + * this bus shows up under /sys/devices with .name value + * "virtpci%d" any devices added to this bus then show up under + * /sys/devices/virtpci0 + */ + ret = device_register(vbus); + if (ret) { + POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR); + return 0; + } + write_vbus_chp_info(vbus->platform_data /* chanptr */, + &chipset_driver_info); + write_vbus_bus_info(vbus->platform_data /* chanptr */, + &bus_driver_info); + POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO); + return 1; +} + +/* for CHANSOCK wwwnn/max are AUTO-GENERATED; for normal channels, + * wwnn/max are in the channel header. + */ +#define GET_SCSIADAPINFO_FROM_CHANPTR(chanptr) { \ + memcpy_fromio(&scsi.wwnn, \ + &((struct spar_io_channel_protocol __iomem *) \ + chanptr)->vhba.wwnn, \ + sizeof(struct vhba_wwnn)); \ + memcpy_fromio(&scsi.max, \ + &((struct spar_io_channel_protocol __iomem *) \ + chanptr)->vhba.max, \ + sizeof(struct vhba_config_max)); \ + } + +/* adds a vhba + * returns 0 failure, 1 success, + */ +static int add_vhba(struct add_virt_guestpart *addparams) +{ + int i; + struct scsi_adap_info scsi; + struct device *vbus; + unsigned char busid[BUS_ID_SIZE]; + + POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); + if (!WAIT_FOR_IO_CHANNEL + ((struct spar_io_channel_protocol __iomem *)addparams->chanptr)) { + POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR); + return 0; + } + + GET_SCSIADAPINFO_FROM_CHANPTR(addparams->chanptr); + + /* find bus device with the busid that matches match_busid */ + sprintf(busid, "vbus%d", addparams->bus_no); + vbus = bus_find_device(&virtpci_bus_type, NULL, + (void *)busid, match_busid); + if (!vbus) + return 0; + + i = virtpci_device_add(vbus, VIRTHBA_TYPE, addparams, &scsi, NULL); + if (i) { + POSTCODE_LINUX_3(VPCI_CREATE_EXIT_PC, i, + POSTCODE_SEVERITY_INFO); + } + return i; +} + +/* for CHANSOCK macaddr is AUTO-GENERATED; for normal channels, + * macaddr is in the channel header. + */ +#define GET_NETADAPINFO_FROM_CHANPTR(chanptr) { \ + memcpy_fromio(net.mac_addr, \ + ((struct spar_io_channel_protocol __iomem *) \ + chanptr)->vnic.macaddr, \ + MAX_MACADDR_LEN); \ + net.num_rcv_bufs = \ + readl(&((struct spar_io_channel_protocol __iomem *)\ + chanptr)->vnic.num_rcv_bufs); \ + net.mtu = readl(&((struct spar_io_channel_protocol __iomem *) \ + chanptr)->vnic.mtu); \ + memcpy_fromio(&net.zone_uuid, \ + &((struct spar_io_channel_protocol __iomem *)\ + chanptr)->vnic.zone_uuid, \ + sizeof(uuid_le)); \ +} + +/* adds a vnic + * returns 0 failure, 1 success, + */ +static int +add_vnic(struct add_virt_guestpart *addparams) +{ + int i; + struct net_adap_info net; + struct device *vbus; + unsigned char busid[BUS_ID_SIZE]; + + POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); + if (!WAIT_FOR_IO_CHANNEL + ((struct spar_io_channel_protocol __iomem *)addparams->chanptr)) { + POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR); + return 0; + } + + GET_NETADAPINFO_FROM_CHANPTR(addparams->chanptr); + + /* find bus device with the busid that matches match_busid */ + sprintf(busid, "vbus%d", addparams->bus_no); + vbus = bus_find_device(&virtpci_bus_type, NULL, + (void *)busid, match_busid); + if (!vbus) + return 0; + + i = virtpci_device_add(vbus, VIRTNIC_TYPE, addparams, NULL, &net); + if (i) { + POSTCODE_LINUX_3(VPCI_CREATE_EXIT_PC, i, + POSTCODE_SEVERITY_INFO); + return 1; + } + return 0; +} + +/* delete vbus + * returns 0 failure, 1 success, + */ +static int +delete_vbus(struct del_vbus_guestpart *delparams) +{ + struct device *vbus; + unsigned char busid[BUS_ID_SIZE]; + + /* find bus device with the busid that matches match_busid */ + sprintf(busid, "vbus%d", delparams->bus_no); + vbus = bus_find_device(&virtpci_bus_type, NULL, + (void *)busid, match_busid); + if (!vbus) + return 0; + + /* ensure that bus has no devices? -- TBD */ + return 1; +} + +static int +delete_vbus_device(struct device *vbus, void *data) +{ + struct device *dev = &virtpci_rootbus_device; + + if ((data) && match_busid(vbus, (void *)BUS_ID(dev))) { + /* skip it - don't delete root bus */ + return 0; /* pretend no error */ + } + device_unregister(vbus); + kfree(vbus); + return 0; /* no error */ +} + +/* pause vhba +* returns 0 failure, 1 success, +*/ +static int pause_vhba(struct pause_virt_guestpart *pauseparams) +{ + int i; + struct scsi_adap_info scsi; + + GET_SCSIADAPINFO_FROM_CHANPTR(pauseparams->chanptr); + + i = virtpci_device_serverdown(NULL /*no parent bus */, VIRTHBA_TYPE, + &scsi.wwnn, NULL); + return i; +} + +/* pause vnic + * returns 0 failure, 1 success, + */ +static int pause_vnic(struct pause_virt_guestpart *pauseparams) +{ + int i; + struct net_adap_info net; + + GET_NETADAPINFO_FROM_CHANPTR(pauseparams->chanptr); + + i = virtpci_device_serverdown(NULL /*no parent bus */, VIRTNIC_TYPE, + NULL, net.mac_addr); + return i; +} + +/* resume vhba + * returns 0 failure, 1 success, + */ +static int resume_vhba(struct resume_virt_guestpart *resumeparams) +{ + int i; + struct scsi_adap_info scsi; + + GET_SCSIADAPINFO_FROM_CHANPTR(resumeparams->chanptr); + + i = virtpci_device_serverup(NULL /*no parent bus */, VIRTHBA_TYPE, + &scsi.wwnn, NULL); + return i; +} + +/* resume vnic +* returns 0 failure, 1 success, +*/ +static int +resume_vnic(struct resume_virt_guestpart *resumeparams) +{ + int i; + struct net_adap_info net; + + GET_NETADAPINFO_FROM_CHANPTR(resumeparams->chanptr); + + i = virtpci_device_serverup(NULL /*no parent bus */, VIRTNIC_TYPE, + NULL, net.mac_addr); + return i; +} + +/* delete vhba +* returns 0 failure, 1 success, +*/ +static int delete_vhba(struct del_virt_guestpart *delparams) +{ + int i; + struct scsi_adap_info scsi; + + GET_SCSIADAPINFO_FROM_CHANPTR(delparams->chanptr); + + i = virtpci_device_del(NULL /*no parent bus */, VIRTHBA_TYPE, + &scsi.wwnn, NULL); + if (i) { + return 1; + } + return 0; +} + +/* deletes a vnic + * returns 0 failure, 1 success, + */ +static int delete_vnic(struct del_virt_guestpart *delparams) +{ + int i; + struct net_adap_info net; + + GET_NETADAPINFO_FROM_CHANPTR(delparams->chanptr); + + i = virtpci_device_del(NULL /*no parent bus */, VIRTNIC_TYPE, NULL, + net.mac_addr); + return i; +} + +#define DELETE_ONE_VPCIDEV(vpcidev) { \ + device_unregister(&vpcidev->generic_dev); \ + kfree(vpcidev); \ +} + +/* deletes all vhbas and vnics + * returns 0 failure, 1 success, + */ +static void delete_all(void) +{ + int count = 0; + unsigned long flags; + struct virtpci_dev *tmpvpcidev, *nextvpcidev; + + /* delete the entire vhba/vnic list in one shot */ + write_lock_irqsave(&vpcidev_list_lock, flags); + tmpvpcidev = vpcidev_list_head; + vpcidev_list_head = NULL; + write_unlock_irqrestore(&vpcidev_list_lock, flags); + + /* delete one vhba/vnic at a time */ + while (tmpvpcidev) { + nextvpcidev = tmpvpcidev->next; + /* delete the vhba/vnic at tmpvpcidev */ + DELETE_ONE_VPCIDEV(tmpvpcidev); + tmpvpcidev = nextvpcidev; + count++; + } + + /* now delete each vbus */ + bus_for_each_dev(&virtpci_bus_type, NULL, (void *)1, + delete_vbus_device); +} + +/* deletes all vnics or vhbas + * returns 0 failure, 1 success, + */ +static int delete_all_virt(enum virtpci_dev_type devtype, + struct del_vbus_guestpart *delparams) +{ + int i; + unsigned char busid[BUS_ID_SIZE]; + struct device *vbus; + + /* find bus device with the busid that matches match_busid */ + sprintf(busid, "vbus%d", delparams->bus_no); + vbus = bus_find_device(&virtpci_bus_type, NULL, + (void *)busid, match_busid); + if (!vbus) + return 0; + + if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) + return 0; + + /* delete all vhbas/vnics */ + i = virtpci_device_del(vbus, devtype, NULL, NULL); + return 1; +} + +static int virtpci_ctrlchan_func(struct guest_msgs *msg) +{ + switch (msg->msgtype) { + case GUEST_ADD_VBUS: + return add_vbus(&msg->add_vbus); + case GUEST_ADD_VHBA: + return add_vhba(&msg->add_vhba); + case GUEST_ADD_VNIC: + return add_vnic(&msg->add_vnic); + case GUEST_DEL_VBUS: + return delete_vbus(&msg->del_vbus); + case GUEST_DEL_VHBA: + return delete_vhba(&msg->del_vhba); + case GUEST_DEL_VNIC: + return delete_vnic(&msg->del_vhba); + case GUEST_DEL_ALL_VHBAS: + return delete_all_virt(VIRTHBA_TYPE, &msg->del_all_vhbas); + case GUEST_DEL_ALL_VNICS: + return delete_all_virt(VIRTNIC_TYPE, &msg->del_all_vnics); + case GUEST_DEL_ALL_VBUSES: + delete_all(); + return 1; + case GUEST_PAUSE_VHBA: + return pause_vhba(&msg->pause_vhba); + case GUEST_PAUSE_VNIC: + return pause_vnic(&msg->pause_vnic); + case GUEST_RESUME_VHBA: + return resume_vhba(&msg->resume_vhba); + case GUEST_RESUME_VNIC: + return resume_vnic(&msg->resume_vnic); + default: + return 0; + } +} + +/* same as driver_helper in bus.c linux */ +static int match_busid(struct device *dev, void *data) +{ + const char *name = data; + + if (strcmp(name, BUS_ID(dev)) == 0) + return 1; + return 0; +} + +/*****************************************************/ +/* Bus functions */ +/*****************************************************/ + +static const struct pci_device_id * +virtpci_match_device(const struct pci_device_id *ids, + const struct virtpci_dev *dev) +{ + while (ids->vendor || ids->subvendor || ids->class_mask) { + if ((ids->vendor == dev->vendor) && + (ids->device == dev->device)) + return ids; + + ids++; + } + return NULL; +} + +/* NOTE: !!!!!! This function is called when a new device is added +* for this bus. Or, it is called for existing devices when a new +* driver is added for this bus. It returns nonzero if a given device +* can be handled by the given driver. +*/ +static int virtpci_bus_match(struct device *dev, struct device_driver *drv) +{ + struct virtpci_dev *virtpcidev = device_to_virtpci_dev(dev); + struct virtpci_driver *virtpcidrv = driver_to_virtpci_driver(drv); + int match = 0; + + /* check ids list for a match */ + if (virtpci_match_device(virtpcidrv->id_table, virtpcidev)) + match = 1; + + return match; /* 0 - no match; 1 - yes it matches */ +} + +static int virtpci_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + /* add variables to the environment prior to the generation of + * hotplug events to user space + */ + if (add_uevent_var(env, "VIRTPCI_VERSION=%s", VIRTPCI_VERSION)) + return -ENOMEM; + return 0; +} + +/* For a child device just created on a client bus, fill in + * information about the driver that is controlling this device into + * the appropriate slot within the vbus channel of the bus + * instance. + */ +static void fix_vbus_dev_info(struct device *dev, int dev_no, int dev_type, + struct virtpci_driver *virtpcidrv) +{ + struct device *vbus; + void *chan; + struct ultra_vbus_deviceinfo dev_info; + const char *stype; + + if (!dev) + return; + if (!virtpcidrv) + return; + + vbus = dev->parent; + if (!vbus) + return; + + chan = vbus->platform_data; + if (!chan) + return; + + switch (dev_type) { + case PCI_DEVICE_ID_VIRTHBA: + stype = "vHBA"; + break; + case PCI_DEVICE_ID_VIRTNIC: + stype = "vNIC"; + break; + default: + stype = "unknown"; + break; + } + bus_device_info_init(&dev_info, stype, + virtpcidrv->name, + virtpcidrv->version, + virtpcidrv->vertag); + write_vbus_dev_info(chan, &dev_info, dev_no); + + /* Re-write bus+chipset info, because it is possible that this + * was previously written by our good counterpart, visorbus. + */ + write_vbus_chp_info(chan, &chipset_driver_info); + write_vbus_bus_info(chan, &bus_driver_info); +} + +/* This function is called to query the existence of a specific device +* and whether this driver can work with it. It should return -ENODEV +* in case of failure. +*/ +static int virtpci_device_probe(struct device *dev) +{ + struct virtpci_dev *virtpcidev = device_to_virtpci_dev(dev); + struct virtpci_driver *virtpcidrv = + driver_to_virtpci_driver(dev->driver); + const struct pci_device_id *id; + int error = 0; + + POSTCODE_LINUX_2(VPCI_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO); + /* static match and static probe vs dynamic match & dynamic + * probe - do we care?. + */ + if (!virtpcidrv->id_table) + return -ENODEV; + + id = virtpci_match_device(virtpcidrv->id_table, virtpcidev); + if (!id) + return -ENODEV; + + /* increment reference count */ + get_device(dev); + + /* if virtpcidev is not already claimed & probe function is + * valid, probe it + */ + if (!virtpcidev->mydriver && virtpcidrv->probe) { + /* call the probe function - virthba or virtnic probe + * is what it should be + */ + error = virtpcidrv->probe(virtpcidev, id); + if (!error) { + fix_vbus_dev_info(dev, virtpcidev->device_no, + virtpcidev->device, virtpcidrv); + virtpcidev->mydriver = virtpcidrv; + POSTCODE_LINUX_2(VPCI_PROBE_EXIT_PC, + POSTCODE_SEVERITY_INFO); + } else { + put_device(dev); + } + } + POSTCODE_LINUX_2(VPCI_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR); + return error; /* -ENODEV for probe failure */ +} + +static int virtpci_device_remove(struct device *dev_) +{ + /* dev_ passed in is the HBA device which we called + * generic_dev in our virtpcidev struct + */ + struct virtpci_dev *virtpcidev = device_to_virtpci_dev(dev_); + struct virtpci_driver *virtpcidrv = virtpcidev->mydriver; + + if (virtpcidrv) { + /* TEMP: assuming we have only one such driver for now */ + if (virtpcidrv->remove) + virtpcidrv->remove(virtpcidev); + virtpcidev->mydriver = NULL; + } + + put_device(dev_); + return 0; +} + +/*****************************************************/ +/* Bus functions */ +/*****************************************************/ + +static void virtpci_bus_release(struct device *dev) +{ +} + +/*****************************************************/ +/* Adapter functions */ +/*****************************************************/ + +/* scsi is expected to be NULL for VNIC add + * net is expected to be NULL for VHBA add + */ +static int virtpci_device_add(struct device *parentbus, int devtype, + struct add_virt_guestpart *addparams, + struct scsi_adap_info *scsi, + struct net_adap_info *net) +{ + struct virtpci_dev *virtpcidev = NULL; + struct virtpci_dev *tmpvpcidev = NULL, *prev; + unsigned long flags; + int ret; + struct spar_io_channel_protocol __iomem *io_chan = NULL; + struct device *dev; + + POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); + + if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) { + POSTCODE_LINUX_3(VPCI_CREATE_FAILURE_PC, devtype, + POSTCODE_SEVERITY_ERR); + return 0; + } + + /* add a Virtual Device */ + virtpcidev = kzalloc(sizeof(*virtpcidev), GFP_ATOMIC); + if (!virtpcidev) { + POSTCODE_LINUX_2(MALLOC_FAILURE_PC, POSTCODE_SEVERITY_ERR); + return 0; + } + + /* initialize stuff unique to virtpci_dev struct */ + virtpcidev->devtype = devtype; + if (devtype == VIRTHBA_TYPE) { + virtpcidev->device = PCI_DEVICE_ID_VIRTHBA; + virtpcidev->scsi = *scsi; + } else { + virtpcidev->device = PCI_DEVICE_ID_VIRTNIC; + virtpcidev->net = *net; + } + virtpcidev->vendor = PCI_VENDOR_ID_UNISYS; + virtpcidev->bus_no = addparams->bus_no; + virtpcidev->device_no = addparams->device_no; + + virtpcidev->queueinfo.chan = addparams->chanptr; + virtpcidev->queueinfo.send_int_if_needed = NULL; + + /* Set up safe queue... */ + io_chan = (struct spar_io_channel_protocol __iomem *) + virtpcidev->queueinfo.chan; + + virtpcidev->intr = addparams->intr; + + /* initialize stuff in the device portion of the struct */ + virtpcidev->generic_dev.bus = &virtpci_bus_type; + virtpcidev->generic_dev.parent = parentbus; + virtpcidev->generic_dev.release = virtpci_device_release; + + dev_set_name(&virtpcidev->generic_dev, "%x:%x", + addparams->bus_no, addparams->device_no); + + /* add the vhba/vnic to virtpci device list - but check for + * duplicate wwnn/macaddr first + */ + write_lock_irqsave(&vpcidev_list_lock, flags); + for (tmpvpcidev = vpcidev_list_head; tmpvpcidev; + tmpvpcidev = tmpvpcidev->next) { + if (devtype == VIRTHBA_TYPE) { + if ((tmpvpcidev->scsi.wwnn.wwnn1 == scsi->wwnn.wwnn1) && + (tmpvpcidev->scsi.wwnn.wwnn2 == scsi->wwnn.wwnn2)) { + /* duplicate - already have vpcidev + with this wwnn */ + break; + } + } else + if (memcmp + (tmpvpcidev->net.mac_addr, net->mac_addr, + MAX_MACADDR_LEN) == 0) { + /* duplicate - already have vnic with this wwnn */ + break; + } + } + if (tmpvpcidev) { + /* found a vhba/vnic already in the list with same + * wwnn or macaddr - reject add + */ + write_unlock_irqrestore(&vpcidev_list_lock, flags); + kfree(virtpcidev); + POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR); + return 0; + } + + /* add it at the head */ + if (!vpcidev_list_head) { + vpcidev_list_head = virtpcidev; + } else { + /* insert virtpcidev at the head of our linked list of + * vpcidevs + */ + virtpcidev->next = vpcidev_list_head; + vpcidev_list_head = virtpcidev; + } + + write_unlock_irqrestore(&vpcidev_list_lock, flags); + + /* Must transition channel to ATTACHED state BEFORE + * registering the device, because polling of the channel + * queues can begin at any time after device_register(). + */ + dev = &virtpcidev->generic_dev; + SPAR_CHANNEL_CLIENT_TRANSITION(addparams->chanptr, + BUS_ID(dev), + CHANNELCLI_ATTACHED, NULL); + + /* don't register until device has been added to + * list. Otherwise, a device_unregister from this function can + * cause a "scheduling while atomic". + */ + ret = device_register(&virtpcidev->generic_dev); + /* NOTE: THIS IS CALLING HOTPLUG virtpci_hotplug!!! + * This call to device_register results in virtpci_bus_match + * being called !!!!! And, if match returns success, then + * virtpcidev->generic_dev.driver is setup to core_driver, + * i.e., virtpci and the probe function + * virtpcidev->generic_dev.driver->probe is called which + * results in virtpci_device_probe being called. And if + * virtpci_device_probe is successful + */ + if (ret) { + dev = &virtpcidev->generic_dev; + SPAR_CHANNEL_CLIENT_TRANSITION(addparams->chanptr, + BUS_ID(dev), + CHANNELCLI_DETACHED, NULL); + /* remove virtpcidev, the one we just added, from the list */ + write_lock_irqsave(&vpcidev_list_lock, flags); + for (tmpvpcidev = vpcidev_list_head, prev = NULL; + tmpvpcidev; + prev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) { + if (tmpvpcidev == virtpcidev) { + if (prev) + prev->next = tmpvpcidev->next; + else + vpcidev_list_head = tmpvpcidev->next; + break; + } + } + write_unlock_irqrestore(&vpcidev_list_lock, flags); + kfree(virtpcidev); + return 0; + } + + POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO); + return 1; +} + +static int virtpci_device_serverdown(struct device *parentbus, + int devtype, + struct vhba_wwnn *wwnn, + unsigned char macaddr[]) +{ + int pausethisone = 0; + bool found = false; + struct virtpci_dev *tmpvpcidev, *prevvpcidev; + struct virtpci_driver *vpcidriver; + unsigned long flags; + int rc = 0; + + if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) + return 0; + + /* find the vhba or vnic in virtpci device list */ + write_lock_irqsave(&vpcidev_list_lock, flags); + + for (tmpvpcidev = vpcidev_list_head, prevvpcidev = NULL; + (tmpvpcidev && !found); + prevvpcidev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) { + if (tmpvpcidev->devtype != devtype) + continue; + + if (devtype == VIRTHBA_TYPE) { + pausethisone = + ((tmpvpcidev->scsi.wwnn.wwnn1 == wwnn->wwnn1) && + (tmpvpcidev->scsi.wwnn.wwnn2 == wwnn->wwnn2)); + /* devtype is vhba, we're pausing vhba whose + * wwnn matches the current device's wwnn + */ + } else { /* VIRTNIC_TYPE */ + pausethisone = + memcmp(tmpvpcidev->net.mac_addr, macaddr, + MAX_MACADDR_LEN) == 0; + /* devtype is vnic, we're pausing vnic whose + * macaddr matches the current device's macaddr */ + } + + if (!pausethisone) + continue; + + found = true; + vpcidriver = tmpvpcidev->mydriver; + rc = vpcidriver->suspend(tmpvpcidev, 0); + } + write_unlock_irqrestore(&vpcidev_list_lock, flags); + + if (!found) + return 0; + + return rc; +} + +static int virtpci_device_serverup(struct device *parentbus, + int devtype, + struct vhba_wwnn *wwnn, + unsigned char macaddr[]) +{ + int resumethisone = 0; + bool found = false; + struct virtpci_dev *tmpvpcidev, *prevvpcidev; + struct virtpci_driver *vpcidriver; + unsigned long flags; + int rc = 0; + + if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) + return 0; + + + /* find the vhba or vnic in virtpci device list */ + write_lock_irqsave(&vpcidev_list_lock, flags); + + for (tmpvpcidev = vpcidev_list_head, prevvpcidev = NULL; + (tmpvpcidev && !found); + prevvpcidev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) { + if (tmpvpcidev->devtype != devtype) + continue; + + if (devtype == VIRTHBA_TYPE) { + resumethisone = + ((tmpvpcidev->scsi.wwnn.wwnn1 == wwnn->wwnn1) && + (tmpvpcidev->scsi.wwnn.wwnn2 == wwnn->wwnn2)); + /* devtype is vhba, we're resuming vhba whose + * wwnn matches the current device's wwnn */ + } else { /* VIRTNIC_TYPE */ + resumethisone = + memcmp(tmpvpcidev->net.mac_addr, macaddr, + MAX_MACADDR_LEN) == 0; + /* devtype is vnic, we're resuming vnic whose + * macaddr matches the current device's macaddr */ + } + + if (!resumethisone) + continue; + + found = true; + vpcidriver = tmpvpcidev->mydriver; + /* This should be done at BUS resume time, but an + * existing problem prevents us from ever getting a bus + * resume... This hack would fail to work should we + * ever have a bus that contains NO devices, since we + * would never even get here in that case. + */ + fix_vbus_dev_info(&tmpvpcidev->generic_dev, + tmpvpcidev->device_no, + tmpvpcidev->device, vpcidriver); + rc = vpcidriver->resume(tmpvpcidev); + } + + write_unlock_irqrestore(&vpcidev_list_lock, flags); + + if (!found) + return 0; + + return rc; +} + +static int virtpci_device_del(struct device *parentbus, + int devtype, struct vhba_wwnn *wwnn, + unsigned char macaddr[]) +{ + int count = 0, all = 0, delthisone; + struct virtpci_dev *tmpvpcidev, *prevvpcidev, *dellist = NULL; + unsigned long flags; + +#define DEL_CONTINUE { \ + prevvpcidev = tmpvpcidev;\ + tmpvpcidev = tmpvpcidev->next;\ + continue; \ +} + + if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) + return 0; + + /* see if we are to delete all - NOTE: all implies we have a + * valid parentbus + */ + all = ((devtype == VIRTHBA_TYPE) && (!wwnn)) || + ((devtype == VIRTNIC_TYPE) && (!macaddr)); + + /* find all the vhba or vnic or both in virtpci device list + * keep list of ones we are deleting so we can call + * device_unregister after we release the lock; otherwise we + * encounter "schedule while atomic" + */ + write_lock_irqsave(&vpcidev_list_lock, flags); + for (tmpvpcidev = vpcidev_list_head, prevvpcidev = NULL; tmpvpcidev;) { + if (tmpvpcidev->devtype != devtype) + DEL_CONTINUE; + + if (all) { + delthisone = + (tmpvpcidev->generic_dev.parent == parentbus); + /* we're deleting all vhbas or vnics on the + * specified parent bus + */ + } else if (devtype == VIRTHBA_TYPE) { + delthisone = + ((tmpvpcidev->scsi.wwnn.wwnn1 == wwnn->wwnn1) && + (tmpvpcidev->scsi.wwnn.wwnn2 == wwnn->wwnn2)); + /* devtype is vhba, we're deleting vhba whose + * wwnn matches the current device's wwnn + */ + } else { /* VIRTNIC_TYPE */ + delthisone = + memcmp(tmpvpcidev->net.mac_addr, macaddr, + MAX_MACADDR_LEN) == 0; + /* devtype is vnic, we're deleting vnic whose + * macaddr matches the current device's macaddr + */ + } + + if (!delthisone) + DEL_CONTINUE; + + /* take vhba/vnic out of the list */ + if (prevvpcidev) + /* not at head */ + prevvpcidev->next = tmpvpcidev->next; + else + vpcidev_list_head = tmpvpcidev->next; + + /* add it to our deletelist */ + tmpvpcidev->next = dellist; + dellist = tmpvpcidev; + + count++; + if (!all) + break; /* done */ + /* going to top of loop again - set tmpvpcidev to next + * one we're to process + */ + if (prevvpcidev) + tmpvpcidev = prevvpcidev->next; + else + tmpvpcidev = vpcidev_list_head; + } + write_unlock_irqrestore(&vpcidev_list_lock, flags); + + if (!all && (count == 0)) + return 0; + + /* now delete each one from delete list */ + while (dellist) { + /* save next */ + tmpvpcidev = dellist->next; + /* delete the vhba/vnic at dellist */ + DELETE_ONE_VPCIDEV(dellist); + /* do next */ + dellist = tmpvpcidev; + } + + return count; +} + +static void virtpci_device_release(struct device *dev_) +{ + /* this function is called when the last reference to the + * device is removed + */ +} + +/*****************************************************/ +/* Driver functions */ +/*****************************************************/ + +#define kobj_to_device_driver(obj) container_of(obj, struct device_driver, kobj) +#define attribute_to_driver_attribute(obj) \ + container_of(obj, struct driver_attribute, attr) + +static ssize_t virtpci_driver_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct driver_attribute *dattr = attribute_to_driver_attribute(attr); + ssize_t ret = 0; + + struct driver_private *dprivate = to_driver(kobj); + struct device_driver *driver = dprivate->driver; + + if (dattr->show) + ret = dattr->show(driver, buf); + + return ret; +} + +static ssize_t virtpci_driver_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t count) +{ + struct driver_attribute *dattr = attribute_to_driver_attribute(attr); + ssize_t ret = 0; + + struct driver_private *dprivate = to_driver(kobj); + struct device_driver *driver = dprivate->driver; + + if (dattr->store) + ret = dattr->store(driver, buf, count); + + return ret; +} + +/* register a new virtpci driver */ +int virtpci_register_driver(struct virtpci_driver *drv) +{ + int result = 0; + + if (!drv->id_table) + return 1; + /* initialize core driver fields needed to call driver_register */ + drv->core_driver.name = drv->name; /* name of driver in sysfs */ + drv->core_driver.bus = &virtpci_bus_type; /* type of bus this + * driver works with */ + drv->core_driver.probe = virtpci_device_probe; /* called to query the + * existence of a + * specific device and + * whether this driver + *can work with it */ + drv->core_driver.remove = virtpci_device_remove; /* called when the + * device is removed + * from the system */ + /* register with core */ + result = driver_register(&drv->core_driver); + /* calls bus_add_driver which calls driver_attach and + * module_add_driver + */ + if (result) + return result; /* failed */ + + drv->core_driver.p->kobj.ktype = &virtpci_driver_kobj_type; + + return 0; +} +EXPORT_SYMBOL_GPL(virtpci_register_driver); + +void virtpci_unregister_driver(struct virtpci_driver *drv) +{ + driver_unregister(&drv->core_driver); + /* driver_unregister calls bus_remove_driver + * bus_remove_driver calls device_detach + * device_detach calls device_release_driver for each of the + * driver's devices + * device_release driver calls drv->remove which is + * virtpci_device_remove + * virtpci_device_remove calls virthba_remove + */ +} +EXPORT_SYMBOL_GPL(virtpci_unregister_driver); + +/*****************************************************/ +/* debugfs filesystem functions */ +/*****************************************************/ +struct print_vbus_info { + int *str_pos; + char *buf; + size_t *len; +}; + +static int print_vbus(struct device *vbus, void *data) +{ + struct print_vbus_info *p = (struct print_vbus_info *)data; + + *p->str_pos += scnprintf(p->buf + *p->str_pos, *p->len - *p->str_pos, + "bus_id:%s\n", dev_name(vbus)); + return 0; +} + +static ssize_t info_debugfs_read(struct file *file, char __user *buf, + size_t len, loff_t *offset) +{ + ssize_t bytes_read = 0; + int str_pos = 0; + struct virtpci_dev *tmpvpcidev; + unsigned long flags; + struct print_vbus_info printparam; + char *vbuf; + + if (len > MAX_BUF) + len = MAX_BUF; + vbuf = kzalloc(len, GFP_KERNEL); + if (!vbuf) + return -ENOMEM; + + str_pos += scnprintf(vbuf + str_pos, len - str_pos, + " Virtual PCI Bus devices\n"); + printparam.str_pos = &str_pos; + printparam.buf = vbuf; + printparam.len = &len; + bus_for_each_dev(&virtpci_bus_type, NULL, (void *)&printparam, + print_vbus); + + str_pos += scnprintf(vbuf + str_pos, len - str_pos, + "\n Virtual PCI devices\n"); + read_lock_irqsave(&vpcidev_list_lock, flags); + tmpvpcidev = vpcidev_list_head; + while (tmpvpcidev) { + if (tmpvpcidev->devtype == VIRTHBA_TYPE) { + str_pos += scnprintf(vbuf + str_pos, len - str_pos, + "[%d:%d] VHba:%08x:%08x max-config:%d-%d-%d-%d", + tmpvpcidev->bus_no, + tmpvpcidev->device_no, + tmpvpcidev->scsi.wwnn.wwnn1, + tmpvpcidev->scsi.wwnn.wwnn2, + tmpvpcidev->scsi.max.max_channel, + tmpvpcidev->scsi.max.max_id, + tmpvpcidev->scsi.max.max_lun, + tmpvpcidev->scsi.max.cmd_per_lun); + } else { + str_pos += scnprintf(vbuf + str_pos, len - str_pos, + "[%d:%d] VNic:%pM num_rcv_bufs:%d mtu:%d", + tmpvpcidev->bus_no, + tmpvpcidev->device_no, + tmpvpcidev->net.mac_addr, + tmpvpcidev->net.num_rcv_bufs, + tmpvpcidev->net.mtu); + } + str_pos += scnprintf(vbuf + str_pos, + len - str_pos, " chanptr:%p\n", + tmpvpcidev->queueinfo.chan); + tmpvpcidev = tmpvpcidev->next; + } + read_unlock_irqrestore(&vpcidev_list_lock, flags); + + str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n"); + bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos); + kfree(vbuf); + return bytes_read; +} + +/*****************************************************/ +/* Module Init & Exit functions */ +/*****************************************************/ + +static int __init virtpci_mod_init(void) +{ + int ret; + + if (!unisys_spar_platform) + return -ENODEV; + + POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); + + ret = bus_register(&virtpci_bus_type); + /* creates /sys/bus/uisvirtpci which contains devices & + * drivers directory + */ + if (ret) { + POSTCODE_LINUX_3(VPCI_CREATE_FAILURE_PC, ret, + POSTCODE_SEVERITY_ERR); + return ret; + } + bus_device_info_init(&bus_driver_info, "clientbus", "virtpci", + VERSION, NULL); + + /* create a root bus used to parent all the virtpci buses. */ + ret = device_register(&virtpci_rootbus_device); + if (ret) { + bus_unregister(&virtpci_bus_type); + POSTCODE_LINUX_3(VPCI_CREATE_FAILURE_PC, ret, + POSTCODE_SEVERITY_ERR); + return ret; + } + + if (!uisctrl_register_req_handler(2, (void *)&virtpci_ctrlchan_func, + &chipset_driver_info)) { + POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR); + device_unregister(&virtpci_rootbus_device); + bus_unregister(&virtpci_bus_type); + return -1; + } + + /* create debugfs directory and info file inside. */ + virtpci_debugfs_dir = debugfs_create_dir("virtpci", NULL); + debugfs_create_file("info", S_IRUSR, virtpci_debugfs_dir, + NULL, &debugfs_info_fops); + POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO); + return 0; +} + +static void __exit virtpci_mod_exit(void) +{ + /* unregister the callback function */ + device_unregister(&virtpci_rootbus_device); + bus_unregister(&virtpci_bus_type); + debugfs_remove_recursive(virtpci_debugfs_dir); +} + +module_init(virtpci_mod_init); +module_exit(virtpci_mod_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Usha Srinivasan"); +MODULE_ALIAS("uisvirtpci"); + diff --git a/kernel/drivers/staging/unisys/virtpci/virtpci.h b/kernel/drivers/staging/unisys/virtpci/virtpci.h new file mode 100644 index 000000000..9d85f55e8 --- /dev/null +++ b/kernel/drivers/staging/unisys/virtpci/virtpci.h @@ -0,0 +1,103 @@ +/* virtpci.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* + * Unisys Virtual PCI driver header + */ + +#ifndef __VIRTPCI_H__ +#define __VIRTPCI_H__ + +#include "uisqueue.h" +#include +#include + +#define PCI_DEVICE_ID_VIRTHBA 0xAA00 +#define PCI_DEVICE_ID_VIRTNIC 0xAB00 + +struct scsi_adap_info { + void *scsihost; /* scsi host if this device is a scsi hba */ + struct vhba_wwnn wwnn; /* the world wide node name of vhba */ + struct vhba_config_max max; /* various max specifications used + * to config vhba */ +}; + +struct net_adap_info { + struct net_device *netdev; /* network device if this + * device is a NIC */ + u8 mac_addr[MAX_MACADDR_LEN]; + int num_rcv_bufs; + unsigned mtu; + uuid_le zone_uuid; +}; + +enum virtpci_dev_type { + VIRTHBA_TYPE = 0, + VIRTNIC_TYPE = 1, + VIRTBUS_TYPE = 6, +}; + +struct virtpci_dev { + enum virtpci_dev_type devtype; /* indicates type of the + * virtual pci device */ + struct virtpci_driver *mydriver; /* which driver has allocated + * this device */ + unsigned short vendor; /* vendor id for device */ + unsigned short device; /* device id for device */ + u32 bus_no; /* number of bus on which device exists */ + u32 device_no; /* device's number on the bus */ + struct irq_info intr; /* interrupt info */ + struct device generic_dev; /* generic device */ + union { + struct scsi_adap_info scsi; + struct net_adap_info net; + }; + + struct uisqueue_info queueinfo; /* holds ptr to channel where cmds & + * rsps are queued & retrieved */ + struct virtpci_dev *next; /* points to next virtpci device */ +}; + +struct virtpci_driver { + struct list_head node; + const char *name; /* the name of the driver in sysfs */ + const char *version; + const char *vertag; + const struct pci_device_id *id_table; /* must be non-NULL for probe + * to be called */ + int (*probe)(struct virtpci_dev *dev, + const struct pci_device_id *id); /* device inserted */ + void (*remove)(struct virtpci_dev *dev); /* Device removed (NULL if + * not a hot-plug capable + * driver) */ + int (*suspend)(struct virtpci_dev *dev, + u32 state); /* Device suspended */ + int (*resume)(struct virtpci_dev *dev); /* Device woken up */ + int (*enable_wake)(struct virtpci_dev *dev, + u32 state, int enable); /* Enable wake event */ + struct device_driver core_driver; /* VIRTPCI core fills this in */ +}; + +#define driver_to_virtpci_driver(in_drv) \ + container_of(in_drv, struct virtpci_driver, core_driver) +#define device_to_virtpci_dev(in_dev) \ + container_of(in_dev, struct virtpci_dev, generic_dev) + +int virtpci_register_driver(struct virtpci_driver *); +void virtpci_unregister_driver(struct virtpci_driver *); + +#endif /* __VIRTPCI_H__ */ diff --git a/kernel/drivers/staging/unisys/visorchannel/Kconfig b/kernel/drivers/staging/unisys/visorchannel/Kconfig new file mode 100644 index 000000000..8d31bebf0 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchannel/Kconfig @@ -0,0 +1,10 @@ +# +# Unisys visorchannel configuration +# + +config UNISYS_VISORCHANNEL + tristate "Unisys visorchannel driver" + select UNISYS_VISORUTIL + ---help--- + If you say Y here, you will enable the Unisys visorchannel driver. + diff --git a/kernel/drivers/staging/unisys/visorchannel/Makefile b/kernel/drivers/staging/unisys/visorchannel/Makefile new file mode 100644 index 000000000..e079c96b1 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchannel/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for Unisys visorchannel +# + +obj-$(CONFIG_UNISYS_VISORCHANNEL) += visorchannel.o + +visorchannel-y := visorchannel_main.o visorchannel_funcs.o + +ccflags-y += -Idrivers/staging/unisys/include +ccflags-y += -Idrivers/staging/unisys/common-spar/include +ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels +ccflags-y += -Idrivers/staging/unisys/visorutil diff --git a/kernel/drivers/staging/unisys/visorchannel/globals.h b/kernel/drivers/staging/unisys/visorchannel/globals.h new file mode 100644 index 000000000..0ed8e1d80 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchannel/globals.h @@ -0,0 +1,27 @@ +/* globals.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __VISORCHANNEL_GLOBALS_H__ +#define __VISORCHANNEL_GLOBALS_H__ + +#include "timskmod.h" +#include "memregion.h" +#include "version.h" + +#define MYDRVNAME "visorchannel" + +#endif diff --git a/kernel/drivers/staging/unisys/visorchannel/visorchannel.h b/kernel/drivers/staging/unisys/visorchannel/visorchannel.h new file mode 100644 index 000000000..63f1b9760 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchannel/visorchannel.h @@ -0,0 +1,76 @@ +/* visorchannel.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __VISORCHANNEL_H__ +#define __VISORCHANNEL_H__ + +#include + +#include "memregion.h" +#include "channel.h" +#ifndef HOSTADDRESS +#define HOSTADDRESS u64 +#endif +#ifndef BOOL +#define BOOL int +#endif + +/* Note that for visorchannel_create() and visorchannel_create_overlapped(), + * and arguments may be 0 if we are a channel CLIENT. + * In this case, the values can simply be read from the channel header. + */ +struct visorchannel *visorchannel_create(HOSTADDRESS physaddr, + ulong channel_bytes, uuid_le guid); +struct visorchannel *visorchannel_create_overlapped(ulong channel_bytes, + struct visorchannel *parent, + ulong off, uuid_le guid); +struct visorchannel *visorchannel_create_with_lock(HOSTADDRESS physaddr, + ulong channel_bytes, + uuid_le guid); +struct visorchannel *visorchannel_create_overlapped_with_lock( + ulong channel_bytes, + struct visorchannel *parent, + ulong off, uuid_le guid); +void visorchannel_destroy(struct visorchannel *channel); +int visorchannel_read(struct visorchannel *channel, ulong offset, + void *local, ulong nbytes); +int visorchannel_write(struct visorchannel *channel, ulong offset, + void *local, ulong nbytes); +int visorchannel_clear(struct visorchannel *channel, ulong offset, + u8 ch, ulong nbytes); +BOOL visorchannel_signalremove(struct visorchannel *channel, u32 queue, + void *msg); +BOOL visorchannel_signalinsert(struct visorchannel *channel, u32 queue, + void *msg); +int visorchannel_signalqueue_slots_avail(struct visorchannel *channel, + u32 queue); +int visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue); +HOSTADDRESS visorchannel_get_physaddr(struct visorchannel *channel); +ulong visorchannel_get_nbytes(struct visorchannel *channel); +char *visorchannel_id(struct visorchannel *channel, char *s); +char *visorchannel_zoneid(struct visorchannel *channel, char *s); +u64 visorchannel_get_clientpartition(struct visorchannel *channel); +uuid_le visorchannel_get_uuid(struct visorchannel *channel); +struct memregion *visorchannel_get_memregion(struct visorchannel *channel); +char *visorchannel_uuid_id(uuid_le *guid, char *s); +void visorchannel_debug(struct visorchannel *channel, int num_queues, + struct seq_file *seq, u32 off); +void visorchannel_dump_section(struct visorchannel *chan, char *s, + int off, int len, struct seq_file *seq); +void __iomem *visorchannel_get_header(struct visorchannel *channel); + +#endif diff --git a/kernel/drivers/staging/unisys/visorchannel/visorchannel_funcs.c b/kernel/drivers/staging/unisys/visorchannel/visorchannel_funcs.c new file mode 100644 index 000000000..7a9a7242f --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchannel/visorchannel_funcs.c @@ -0,0 +1,665 @@ +/* visorchannel_funcs.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* + * This provides Supervisor channel communication primitives, which are + * independent of the mechanism used to access the channel data. All channel + * data is accessed using the memregion abstraction. (memregion has both + * a CM2 implementation and a direct memory implementation.) + */ + +#include "globals.h" +#include "visorchannel.h" +#include + +#define MYDRVNAME "visorchannel" + +struct visorchannel { + struct memregion *memregion; /* from visor_memregion_create() */ + struct channel_header chan_hdr; + uuid_le guid; + ulong size; + BOOL needs_lock; /* channel creator knows if more than one + * thread will be inserting or removing */ + spinlock_t insert_lock; /* protect head writes in chan_hdr */ + spinlock_t remove_lock; /* protect tail writes in chan_hdr */ + + struct { + struct signal_queue_header req_queue; + struct signal_queue_header rsp_queue; + struct signal_queue_header event_queue; + struct signal_queue_header ack_queue; + } safe_uis_queue; +}; + +/* Creates the struct visorchannel abstraction for a data area in memory, + * but does NOT modify this data area. + */ +static struct visorchannel * +visorchannel_create_guts(HOSTADDRESS physaddr, ulong channel_bytes, + struct visorchannel *parent, ulong off, uuid_le guid, + BOOL needs_lock) +{ + struct visorchannel *p = NULL; + void *rc = NULL; + + p = kmalloc(sizeof(*p), GFP_KERNEL|__GFP_NORETRY); + if (!p) { + rc = NULL; + goto cleanup; + } + p->memregion = NULL; + p->needs_lock = needs_lock; + spin_lock_init(&p->insert_lock); + spin_lock_init(&p->remove_lock); + + /* prepare chan_hdr (abstraction to read/write channel memory) */ + if (!parent) + p->memregion = + visor_memregion_create(physaddr, + sizeof(struct channel_header)); + else + p->memregion = + visor_memregion_create_overlapped(parent->memregion, + off, sizeof(struct channel_header)); + if (!p->memregion) { + rc = NULL; + goto cleanup; + } + if (visor_memregion_read(p->memregion, 0, &p->chan_hdr, + sizeof(struct channel_header)) < 0) { + rc = NULL; + goto cleanup; + } + if (channel_bytes == 0) + /* we had better be a CLIENT of this channel */ + channel_bytes = (ulong)p->chan_hdr.size; + if (uuid_le_cmp(guid, NULL_UUID_LE) == 0) + /* we had better be a CLIENT of this channel */ + guid = p->chan_hdr.chtype; + if (visor_memregion_resize(p->memregion, channel_bytes) < 0) { + rc = NULL; + goto cleanup; + } + p->size = channel_bytes; + p->guid = guid; + + rc = p; +cleanup: + + if (!rc) { + if (!p) { + visorchannel_destroy(p); + p = NULL; + } + } + return rc; +} + +struct visorchannel * +visorchannel_create(HOSTADDRESS physaddr, ulong channel_bytes, uuid_le guid) +{ + return visorchannel_create_guts(physaddr, channel_bytes, NULL, 0, guid, + FALSE); +} +EXPORT_SYMBOL_GPL(visorchannel_create); + +struct visorchannel * +visorchannel_create_with_lock(HOSTADDRESS physaddr, ulong channel_bytes, + uuid_le guid) +{ + return visorchannel_create_guts(physaddr, channel_bytes, NULL, 0, guid, + TRUE); +} +EXPORT_SYMBOL_GPL(visorchannel_create_with_lock); + +struct visorchannel * +visorchannel_create_overlapped(ulong channel_bytes, + struct visorchannel *parent, ulong off, + uuid_le guid) +{ + return visorchannel_create_guts(0, channel_bytes, parent, off, guid, + FALSE); +} +EXPORT_SYMBOL_GPL(visorchannel_create_overlapped); + +struct visorchannel * +visorchannel_create_overlapped_with_lock(ulong channel_bytes, + struct visorchannel *parent, ulong off, + uuid_le guid) +{ + return visorchannel_create_guts(0, channel_bytes, parent, off, guid, + TRUE); +} +EXPORT_SYMBOL_GPL(visorchannel_create_overlapped_with_lock); + +void +visorchannel_destroy(struct visorchannel *channel) +{ + if (!channel) + return; + if (channel->memregion) { + visor_memregion_destroy(channel->memregion); + channel->memregion = NULL; + } + kfree(channel); +} +EXPORT_SYMBOL_GPL(visorchannel_destroy); + +HOSTADDRESS +visorchannel_get_physaddr(struct visorchannel *channel) +{ + return visor_memregion_get_physaddr(channel->memregion); +} +EXPORT_SYMBOL_GPL(visorchannel_get_physaddr); + +ulong +visorchannel_get_nbytes(struct visorchannel *channel) +{ + return channel->size; +} +EXPORT_SYMBOL_GPL(visorchannel_get_nbytes); + +char * +visorchannel_uuid_id(uuid_le *guid, char *s) +{ + sprintf(s, "%pUL", guid); + return s; +} +EXPORT_SYMBOL_GPL(visorchannel_uuid_id); + +char * +visorchannel_id(struct visorchannel *channel, char *s) +{ + return visorchannel_uuid_id(&channel->guid, s); +} +EXPORT_SYMBOL_GPL(visorchannel_id); + +char * +visorchannel_zoneid(struct visorchannel *channel, char *s) +{ + return visorchannel_uuid_id(&channel->chan_hdr.zone_uuid, s); +} +EXPORT_SYMBOL_GPL(visorchannel_zoneid); + +HOSTADDRESS +visorchannel_get_clientpartition(struct visorchannel *channel) +{ + return channel->chan_hdr.partition_handle; +} +EXPORT_SYMBOL_GPL(visorchannel_get_clientpartition); + +uuid_le +visorchannel_get_uuid(struct visorchannel *channel) +{ + return channel->guid; +} +EXPORT_SYMBOL_GPL(visorchannel_get_uuid); + +struct memregion * +visorchannel_get_memregion(struct visorchannel *channel) +{ + return channel->memregion; +} +EXPORT_SYMBOL_GPL(visorchannel_get_memregion); + +int +visorchannel_read(struct visorchannel *channel, ulong offset, + void *local, ulong nbytes) +{ + int rc = visor_memregion_read(channel->memregion, offset, + local, nbytes); + if ((rc >= 0) && (offset == 0) && + (nbytes >= sizeof(struct channel_header))) { + memcpy(&channel->chan_hdr, local, + sizeof(struct channel_header)); + } + return rc; +} +EXPORT_SYMBOL_GPL(visorchannel_read); + +int +visorchannel_write(struct visorchannel *channel, ulong offset, + void *local, ulong nbytes) +{ + if (offset == 0 && nbytes >= sizeof(struct channel_header)) + memcpy(&channel->chan_hdr, local, + sizeof(struct channel_header)); + return visor_memregion_write(channel->memregion, offset, local, nbytes); +} +EXPORT_SYMBOL_GPL(visorchannel_write); + +int +visorchannel_clear(struct visorchannel *channel, ulong offset, u8 ch, + ulong nbytes) +{ + int rc = -1; + int bufsize = 65536; + int written = 0; + u8 *buf = vmalloc(bufsize); + + if (!buf) + goto cleanup; + + memset(buf, ch, bufsize); + while (nbytes > 0) { + ulong thisbytes = bufsize; + int x = -1; + + if (nbytes < thisbytes) + thisbytes = nbytes; + x = visor_memregion_write(channel->memregion, offset + written, + buf, thisbytes); + if (x < 0) { + rc = x; + goto cleanup; + } + written += thisbytes; + nbytes -= thisbytes; + } + rc = 0; + +cleanup: + if (buf) { + vfree(buf); + buf = NULL; + } + return rc; +} +EXPORT_SYMBOL_GPL(visorchannel_clear); + +void __iomem * +visorchannel_get_header(struct visorchannel *channel) +{ + return (void __iomem *)&channel->chan_hdr; +} +EXPORT_SYMBOL_GPL(visorchannel_get_header); + +/** Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a + * channel header + */ +#define SIG_QUEUE_OFFSET(chan_hdr, q) \ + ((chan_hdr)->ch_space_offset + \ + ((q) * sizeof(struct signal_queue_header))) + +/** Return offset of a specific queue entry (data) from the beginning of a + * channel header + */ +#define SIG_DATA_OFFSET(chan_hdr, q, sig_hdr, slot) \ + (SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->sig_base_offset + \ + ((slot) * (sig_hdr)->signal_size)) + +/** Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back + * into host memory + */ +#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \ + (visor_memregion_write(channel->memregion, \ + SIG_QUEUE_OFFSET(&channel->chan_hdr, queue)+ \ + offsetof(struct signal_queue_header, FIELD),\ + &((sig_hdr)->FIELD), \ + sizeof((sig_hdr)->FIELD)) >= 0) + +static BOOL +sig_read_header(struct visorchannel *channel, u32 queue, + struct signal_queue_header *sig_hdr) +{ + BOOL rc = FALSE; + + if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header)) + goto cleanup; + + /* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */ + + if (visor_memregion_read(channel->memregion, + SIG_QUEUE_OFFSET(&channel->chan_hdr, queue), + sig_hdr, + sizeof(struct signal_queue_header)) < 0) { + goto cleanup; + } + rc = TRUE; +cleanup: + return rc; +} + +static BOOL +sig_do_data(struct visorchannel *channel, u32 queue, + struct signal_queue_header *sig_hdr, u32 slot, void *data, + BOOL is_write) +{ + BOOL rc = FALSE; + int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue, + sig_hdr, slot); + if (is_write) { + if (visor_memregion_write(channel->memregion, + signal_data_offset, + data, sig_hdr->signal_size) < 0) { + goto cleanup; + } + } else { + if (visor_memregion_read(channel->memregion, signal_data_offset, + data, sig_hdr->signal_size) < 0) { + goto cleanup; + } + } + rc = TRUE; +cleanup: + return rc; +} + +static inline BOOL +sig_read_data(struct visorchannel *channel, u32 queue, + struct signal_queue_header *sig_hdr, u32 slot, void *data) +{ + return sig_do_data(channel, queue, sig_hdr, slot, data, FALSE); +} + +static inline BOOL +sig_write_data(struct visorchannel *channel, u32 queue, + struct signal_queue_header *sig_hdr, u32 slot, void *data) +{ + return sig_do_data(channel, queue, sig_hdr, slot, data, TRUE); +} + +static inline unsigned char +safe_sig_queue_validate(struct signal_queue_header *psafe_sqh, + struct signal_queue_header *punsafe_sqh, + u32 *phead, u32 *ptail) +{ + if ((*phead >= psafe_sqh->max_slots) || + (*ptail >= psafe_sqh->max_slots)) { + /* Choose 0 or max, maybe based on current tail value */ + *phead = 0; + *ptail = 0; + + /* Sync with client as necessary */ + punsafe_sqh->head = *phead; + punsafe_sqh->tail = *ptail; + + return 0; + } + return 1; +} /* end safe_sig_queue_validate */ + +static BOOL +signalremove_inner(struct visorchannel *channel, u32 queue, void *msg) +{ + struct signal_queue_header sig_hdr; + + if (!sig_read_header(channel, queue, &sig_hdr)) + return FALSE; + if (sig_hdr.head == sig_hdr.tail) + return FALSE; /* no signals to remove */ + + sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots; + if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg)) { + return FALSE; + } + sig_hdr.num_received++; + + /* For each data field in SIGNAL_QUEUE_HEADER that was modified, + * update host memory. + */ + mb(); /* required for channel synch */ + if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail)) + return FALSE; + if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received)) + return FALSE; + return TRUE; +} + +BOOL +visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg) +{ + BOOL rc; + + if (channel->needs_lock) { + spin_lock(&channel->remove_lock); + rc = signalremove_inner(channel, queue, msg); + spin_unlock(&channel->remove_lock); + } else { + rc = signalremove_inner(channel, queue, msg); + } + + return rc; +} +EXPORT_SYMBOL_GPL(visorchannel_signalremove); + +static BOOL +signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg) +{ + struct signal_queue_header sig_hdr; + + if (!sig_read_header(channel, queue, &sig_hdr)) + return FALSE; + + sig_hdr.head = ((sig_hdr.head + 1) % sig_hdr.max_slots); + if (sig_hdr.head == sig_hdr.tail) { + sig_hdr.num_overflows++; + visor_memregion_write(channel->memregion, + SIG_QUEUE_OFFSET(&channel->chan_hdr, + queue) + + offsetof(struct signal_queue_header, + num_overflows), + &(sig_hdr.num_overflows), + sizeof(sig_hdr.num_overflows)); + return FALSE; + } + + if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg)) + return FALSE; + + sig_hdr.num_sent++; + + /* For each data field in SIGNAL_QUEUE_HEADER that was modified, + * update host memory. + */ + mb(); /* required for channel synch */ + if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, head)) + return FALSE; + if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent)) { + return FALSE; + } + + return TRUE; +} + +BOOL +visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg) +{ + BOOL rc; + + if (channel->needs_lock) { + spin_lock(&channel->insert_lock); + rc = signalinsert_inner(channel, queue, msg); + spin_unlock(&channel->insert_lock); + } else { + rc = signalinsert_inner(channel, queue, msg); + } + + return rc; +} +EXPORT_SYMBOL_GPL(visorchannel_signalinsert); + +int +visorchannel_signalqueue_slots_avail(struct visorchannel *channel, u32 queue) +{ + struct signal_queue_header sig_hdr; + u32 slots_avail, slots_used; + u32 head, tail; + + if (!sig_read_header(channel, queue, &sig_hdr)) + return 0; + head = sig_hdr.head; + tail = sig_hdr.tail; + if (head < tail) + head = head + sig_hdr.max_slots; + slots_used = (head - tail); + slots_avail = sig_hdr.max_signals - slots_used; + return (int)slots_avail; +} +EXPORT_SYMBOL_GPL(visorchannel_signalqueue_slots_avail); + +int +visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue) +{ + struct signal_queue_header sig_hdr; + + if (!sig_read_header(channel, queue, &sig_hdr)) + return 0; + return (int)sig_hdr.max_signals; +} +EXPORT_SYMBOL_GPL(visorchannel_signalqueue_max_slots); + +static void +sigqueue_debug(struct signal_queue_header *q, int which, struct seq_file *seq) +{ + seq_printf(seq, "Signal Queue #%d\n", which); + seq_printf(seq, " VersionId = %lu\n", (ulong)q->version); + seq_printf(seq, " Type = %lu\n", (ulong)q->chtype); + seq_printf(seq, " oSignalBase = %llu\n", + (long long)q->sig_base_offset); + seq_printf(seq, " SignalSize = %lu\n", (ulong)q->signal_size); + seq_printf(seq, " MaxSignalSlots = %lu\n", + (ulong)q->max_slots); + seq_printf(seq, " MaxSignals = %lu\n", (ulong)q->max_signals); + seq_printf(seq, " FeatureFlags = %-16.16Lx\n", + (long long)q->features); + seq_printf(seq, " NumSignalsSent = %llu\n", + (long long)q->num_sent); + seq_printf(seq, " NumSignalsReceived = %llu\n", + (long long)q->num_received); + seq_printf(seq, " NumOverflows = %llu\n", + (long long)q->num_overflows); + seq_printf(seq, " Head = %lu\n", (ulong)q->head); + seq_printf(seq, " Tail = %lu\n", (ulong)q->tail); +} + +void +visorchannel_debug(struct visorchannel *channel, int num_queues, + struct seq_file *seq, u32 off) +{ + HOSTADDRESS addr = 0; + ulong nbytes = 0, nbytes_region = 0; + struct memregion *memregion = NULL; + struct channel_header hdr; + struct channel_header *phdr = &hdr; + int i = 0; + int errcode = 0; + + if (!channel) + return; + memregion = channel->memregion; + if (!memregion) + return; + + addr = visor_memregion_get_physaddr(memregion); + nbytes_region = visor_memregion_get_nbytes(memregion); + errcode = visorchannel_read(channel, off, + phdr, sizeof(struct channel_header)); + if (errcode < 0) { + seq_printf(seq, + "Read of channel header failed with errcode=%d)\n", + errcode); + if (off == 0) { + phdr = &channel->chan_hdr; + seq_puts(seq, "(following data may be stale)\n"); + } else { + return; + } + } + nbytes = (ulong)(phdr->size); + seq_printf(seq, "--- Begin channel @0x%-16.16Lx for 0x%lx bytes (region=0x%lx bytes) ---\n", + addr + off, nbytes, nbytes_region); + seq_printf(seq, "Type = %pUL\n", &phdr->chtype); + seq_printf(seq, "ZoneGuid = %pUL\n", &phdr->zone_uuid); + seq_printf(seq, "Signature = 0x%-16.16Lx\n", + (long long)phdr->signature); + seq_printf(seq, "LegacyState = %lu\n", (ulong)phdr->legacy_state); + seq_printf(seq, "SrvState = %lu\n", (ulong)phdr->srv_state); + seq_printf(seq, "CliStateBoot = %lu\n", (ulong)phdr->cli_state_boot); + seq_printf(seq, "CliStateOS = %lu\n", (ulong)phdr->cli_state_os); + seq_printf(seq, "HeaderSize = %lu\n", (ulong)phdr->header_size); + seq_printf(seq, "Size = %llu\n", (long long)phdr->size); + seq_printf(seq, "Features = 0x%-16.16llx\n", + (long long)phdr->features); + seq_printf(seq, "PartitionHandle = 0x%-16.16llx\n", + (long long)phdr->partition_handle); + seq_printf(seq, "Handle = 0x%-16.16llx\n", + (long long)phdr->handle); + seq_printf(seq, "VersionId = %lu\n", (ulong)phdr->version_id); + seq_printf(seq, "oChannelSpace = %llu\n", + (long long)phdr->ch_space_offset); + if ((phdr->ch_space_offset == 0) || (errcode < 0)) + ; + else + for (i = 0; i < num_queues; i++) { + struct signal_queue_header q; + + errcode = visorchannel_read(channel, + off + + phdr->ch_space_offset + + (i * sizeof(q)), + &q, sizeof(q)); + if (errcode < 0) { + seq_printf(seq, + "failed to read signal queue #%d from channel @0x%-16.16Lx errcode=%d\n", + i, addr, errcode); + continue; + } + sigqueue_debug(&q, i, seq); + } + seq_printf(seq, "--- End channel @0x%-16.16Lx for 0x%lx bytes ---\n", + addr + off, nbytes); +} +EXPORT_SYMBOL_GPL(visorchannel_debug); + +void +visorchannel_dump_section(struct visorchannel *chan, char *s, + int off, int len, struct seq_file *seq) +{ + char *buf, *tbuf, *fmtbuf; + int fmtbufsize = 0; + int i; + int errcode = 0; + + fmtbufsize = 100 * COVQ(len, 16); + buf = kmalloc(len, GFP_KERNEL|__GFP_NORETRY); + if (!buf) + return; + fmtbuf = kmalloc(fmtbufsize, GFP_KERNEL|__GFP_NORETRY); + if (!fmtbuf) + goto fmt_failed; + + errcode = visorchannel_read(chan, off, buf, len); + if (errcode < 0) + goto read_failed; + seq_printf(seq, "channel %s:\n", s); + tbuf = buf; + while (len > 0) { + i = (len < 16) ? len : 16; + hex_dump_to_buffer(tbuf, i, 16, 1, fmtbuf, fmtbufsize, TRUE); + seq_printf(seq, "%s\n", fmtbuf); + tbuf += 16; + len -= 16; + } + +read_failed: + kfree(fmtbuf); +fmt_failed: + kfree(buf); +} +EXPORT_SYMBOL_GPL(visorchannel_dump_section); diff --git a/kernel/drivers/staging/unisys/visorchannel/visorchannel_main.c b/kernel/drivers/staging/unisys/visorchannel/visorchannel_main.c new file mode 100644 index 000000000..787d4774b --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchannel/visorchannel_main.c @@ -0,0 +1,50 @@ +/* visorchannel_main.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* + * This is a module "wrapper" around visorchannel_funcs. + */ + +#include "globals.h" +#include "channel.h" +#include "visorchannel.h" +#include + +#define MYDRVNAME "visorchannel" + +static int __init +visorchannel_init(void) +{ + if (!unisys_spar_platform) + return -ENODEV; + + return 0; +} + +static void +visorchannel_exit(void) +{ +} + +module_init(visorchannel_init); +module_exit(visorchannel_exit); + +MODULE_AUTHOR("Unisys"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Supervisor channel driver for service partition: ver " + VERSION); +MODULE_VERSION(VERSION); diff --git a/kernel/drivers/staging/unisys/visorchipset/Kconfig b/kernel/drivers/staging/unisys/visorchipset/Kconfig new file mode 100644 index 000000000..b03bfc5c3 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchipset/Kconfig @@ -0,0 +1,11 @@ +# +# Unisys visorchipset configuration +# + +config UNISYS_VISORCHIPSET + tristate "Unisys visorchipset driver" + select UNISYS_VISORUTIL + select UNISYS_VISORCHANNEL + ---help--- + If you say Y here, you will enable the Unisys visorchipset driver. + diff --git a/kernel/drivers/staging/unisys/visorchipset/Makefile b/kernel/drivers/staging/unisys/visorchipset/Makefile new file mode 100644 index 000000000..12686906b --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchipset/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for Unisys visorchipset +# + +obj-$(CONFIG_UNISYS_VISORCHIPSET) += visorchipset.o + +visorchipset-y := visorchipset_main.o file.o parser.o + +ccflags-y += -Idrivers/staging/unisys/include +ccflags-y += -Idrivers/staging/unisys/uislib +ccflags-y += -Idrivers/staging/unisys/visorchannel +ccflags-y += -Idrivers/staging/unisys/common-spar/include +ccflags-y += -Idrivers/staging/unisys/common-spar/include/channels +ccflags-y += -Idrivers/staging/unisys/visorutil +ccflags-y += -Iinclude/generated diff --git a/kernel/drivers/staging/unisys/visorchipset/file.c b/kernel/drivers/staging/unisys/visorchipset/file.c new file mode 100644 index 000000000..203de0b5f --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchipset/file.c @@ -0,0 +1,160 @@ +/* file.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* This contains the implementation that allows a usermode program to + * communicate with the visorchipset driver using a device/file interface. + */ + +#include "globals.h" +#include "visorchannel.h" +#include +#include +#include "uisutils.h" +#include "file.h" + +#define CURRENT_FILE_PC VISOR_CHIPSET_PC_file_c + +static struct cdev file_cdev; +static struct visorchannel **file_controlvm_channel; + +void +visorchipset_file_cleanup(dev_t major_dev) +{ + if (file_cdev.ops != NULL) + cdev_del(&file_cdev); + file_cdev.ops = NULL; + unregister_chrdev_region(major_dev, 1); +} + +static int +visorchipset_open(struct inode *inode, struct file *file) +{ + unsigned minor_number = iminor(inode); + + if (minor_number != 0) + return -ENODEV; + file->private_data = NULL; + return 0; +} + +static int +visorchipset_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static int +visorchipset_mmap(struct file *file, struct vm_area_struct *vma) +{ + ulong physaddr = 0; + ulong offset = vma->vm_pgoff << PAGE_SHIFT; + GUEST_PHYSICAL_ADDRESS addr = 0; + + /* sv_enable_dfp(); */ + if (offset & (PAGE_SIZE - 1)) + return -ENXIO; /* need aligned offsets */ + + switch (offset) { + case VISORCHIPSET_MMAP_CONTROLCHANOFFSET: + vma->vm_flags |= VM_IO; + if (*file_controlvm_channel == NULL) { + return -ENXIO; + } + visorchannel_read(*file_controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + gp_control_channel), + &addr, sizeof(addr)); + if (addr == 0) { + return -ENXIO; + } + physaddr = (ulong)addr; + if (remap_pfn_range(vma, vma->vm_start, + physaddr >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + /*pgprot_noncached */ + (vma->vm_page_prot))) { + return -EAGAIN; + } + break; + default: + return -ENOSYS; + } + return 0; +} + +static long visorchipset_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + s64 adjustment; + s64 vrtc_offset; + + switch (cmd) { + case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET: + /* get the physical rtc offset */ + vrtc_offset = issue_vmcall_query_guest_virtual_time_offset(); + if (copy_to_user + ((void __user *)arg, &vrtc_offset, sizeof(vrtc_offset))) { + return -EFAULT; + } + return SUCCESS; + case VMCALL_UPDATE_PHYSICAL_TIME: + if (copy_from_user + (&adjustment, (void __user *)arg, sizeof(adjustment))) { + return -EFAULT; + } + return issue_vmcall_update_physical_time(adjustment); + default: + return -EFAULT; + } +} + +static const struct file_operations visorchipset_fops = { + .owner = THIS_MODULE, + .open = visorchipset_open, + .read = NULL, + .write = NULL, + .unlocked_ioctl = visorchipset_ioctl, + .release = visorchipset_release, + .mmap = visorchipset_mmap, +}; + +int +visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel) +{ + int rc = 0; + + file_controlvm_channel = controlvm_channel; + cdev_init(&file_cdev, &visorchipset_fops); + file_cdev.owner = THIS_MODULE; + if (MAJOR(major_dev) == 0) { + rc = alloc_chrdev_region(&major_dev, 0, 1, MYDRVNAME); + /* dynamic major device number registration required */ + if (rc < 0) + return rc; + } else { + /* static major device number registration required */ + rc = register_chrdev_region(major_dev, 1, MYDRVNAME); + if (rc < 0) + return rc; + } + rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1); + if (rc < 0) { + unregister_chrdev_region(major_dev, 1); + return rc; + } + return 0; +} diff --git a/kernel/drivers/staging/unisys/visorchipset/file.h b/kernel/drivers/staging/unisys/visorchipset/file.h new file mode 100644 index 000000000..51f7699b7 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchipset/file.h @@ -0,0 +1,27 @@ +/* file.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __FILE_H__ +#define __FILE_H__ + +#include "globals.h" + +int visorchipset_file_init(dev_t majorDev, + struct visorchannel **pControlVm_channel); +void visorchipset_file_cleanup(dev_t major_dev); + +#endif diff --git a/kernel/drivers/staging/unisys/visorchipset/globals.h b/kernel/drivers/staging/unisys/visorchipset/globals.h new file mode 100644 index 000000000..f76e498a3 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchipset/globals.h @@ -0,0 +1,42 @@ +/* globals.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __VISORCHIPSET_GLOBALS_H__ +#define __VISORCHIPSET_GLOBALS_H__ + +#include "diagnostics/appos_subsystems.h" +#include "timskmod.h" +#include "visorchipset.h" +#include "visorchipset_umode.h" +#include "version.h" + +#define MYDRVNAME "visorchipset" + +/* module parameters */ + +extern int visorchipset_testvnic; +extern int visorchipset_testvnicclient; +extern int visorchipset_testmsg; +extern int visorchipset_major; +extern int visorchipset_serverregwait; +extern int visorchipset_clientregwait; +extern int visorchipset_testteardown; +extern int visorchipset_disable_controlvm; +extern int visorchipset_crash_kernel; +extern int visorchipset_holdchipsetready; + +#endif diff --git a/kernel/drivers/staging/unisys/visorchipset/parser.c b/kernel/drivers/staging/unisys/visorchipset/parser.c new file mode 100644 index 000000000..d8a2d6f5a --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchipset/parser.c @@ -0,0 +1,430 @@ +/* parser.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#include "parser.h" +#include "memregion.h" +#include "controlvmchannel.h" +#include +#include +#include + +#define MYDRVNAME "visorchipset_parser" +#define CURRENT_FILE_PC VISOR_CHIPSET_PC_parser_c + +/* We will refuse to allocate more than this many bytes to copy data from + * incoming payloads. This serves as a throttling mechanism. + */ +#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128) +static ulong controlvm_payload_bytes_buffered; + +struct parser_context { + ulong allocbytes; + ulong param_bytes; + u8 *curr; + ulong bytes_remaining; + BOOL byte_stream; + char data[0]; +}; + +static struct parser_context * +parser_init_guts(u64 addr, u32 bytes, BOOL local, + BOOL standard_payload_header, BOOL *retry) +{ + int allocbytes = sizeof(struct parser_context) + bytes; + struct parser_context *rc = NULL; + struct parser_context *ctx = NULL; + struct memregion *rgn = NULL; + struct spar_controlvm_parameters_header *phdr = NULL; + + if (retry) + *retry = FALSE; + if (!standard_payload_header) + /* alloc and 0 extra byte to ensure payload is + * '\0'-terminated + */ + allocbytes++; + if ((controlvm_payload_bytes_buffered + bytes) + > MAX_CONTROLVM_PAYLOAD_BYTES) { + if (retry) + *retry = TRUE; + rc = NULL; + goto cleanup; + } + ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY); + if (!ctx) { + if (retry) + *retry = TRUE; + rc = NULL; + goto cleanup; + } + + ctx->allocbytes = allocbytes; + ctx->param_bytes = bytes; + ctx->curr = NULL; + ctx->bytes_remaining = 0; + ctx->byte_stream = FALSE; + if (local) { + void *p; + + if (addr > virt_to_phys(high_memory - 1)) { + rc = NULL; + goto cleanup; + } + p = __va((ulong) (addr)); + memcpy(ctx->data, p, bytes); + } else { + rgn = visor_memregion_create(addr, bytes); + if (!rgn) { + rc = NULL; + goto cleanup; + } + if (visor_memregion_read(rgn, 0, ctx->data, bytes) < 0) { + rc = NULL; + goto cleanup; + } + } + if (!standard_payload_header) { + ctx->byte_stream = TRUE; + rc = ctx; + goto cleanup; + } + phdr = (struct spar_controlvm_parameters_header *)(ctx->data); + if (phdr->total_length != bytes) { + rc = NULL; + goto cleanup; + } + if (phdr->total_length < phdr->header_length) { + rc = NULL; + goto cleanup; + } + if (phdr->header_length < + sizeof(struct spar_controlvm_parameters_header)) { + rc = NULL; + goto cleanup; + } + + rc = ctx; +cleanup: + if (rgn) { + visor_memregion_destroy(rgn); + rgn = NULL; + } + if (rc) { + controlvm_payload_bytes_buffered += ctx->param_bytes; + } else { + if (ctx) { + parser_done(ctx); + ctx = NULL; + } + } + return rc; +} + +struct parser_context * +parser_init(u64 addr, u32 bytes, BOOL local, BOOL *retry) +{ + return parser_init_guts(addr, bytes, local, TRUE, retry); +} + +/* Call this instead of parser_init() if the payload area consists of just + * a sequence of bytes, rather than a struct spar_controlvm_parameters_header + * structures. Afterwards, you can call parser_simpleString_get() or + * parser_byteStream_get() to obtain the data. + */ +struct parser_context * +parser_init_byte_stream(u64 addr, u32 bytes, BOOL local, BOOL *retry) +{ + return parser_init_guts(addr, bytes, local, FALSE, retry); +} + +/* Obtain '\0'-terminated copy of string in payload area. + */ +char * +parser_simpleString_get(struct parser_context *ctx) +{ + if (!ctx->byte_stream) + return NULL; + return ctx->data; /* note this IS '\0'-terminated, because of + * the num of bytes we alloc+clear in + * parser_init_byteStream() */ +} + +/* Obtain a copy of the buffer in the payload area. + */ +void *parser_byte_stream_get(struct parser_context *ctx, ulong *nbytes) +{ + if (!ctx->byte_stream) + return NULL; + if (nbytes) + *nbytes = ctx->param_bytes; + return (void *)ctx->data; +} + +uuid_le +parser_id_get(struct parser_context *ctx) +{ + struct spar_controlvm_parameters_header *phdr = NULL; + + if (ctx == NULL) + return NULL_UUID_LE; + phdr = (struct spar_controlvm_parameters_header *)(ctx->data); + return phdr->id; +} + +void +parser_param_start(struct parser_context *ctx, PARSER_WHICH_STRING which_string) +{ + struct spar_controlvm_parameters_header *phdr = NULL; + + if (ctx == NULL) + goto Away; + phdr = (struct spar_controlvm_parameters_header *)(ctx->data); + switch (which_string) { + case PARSERSTRING_INITIATOR: + ctx->curr = ctx->data + phdr->initiator_offset; + ctx->bytes_remaining = phdr->initiator_length; + break; + case PARSERSTRING_TARGET: + ctx->curr = ctx->data + phdr->target_offset; + ctx->bytes_remaining = phdr->target_length; + break; + case PARSERSTRING_CONNECTION: + ctx->curr = ctx->data + phdr->connection_offset; + ctx->bytes_remaining = phdr->connection_length; + break; + case PARSERSTRING_NAME: + ctx->curr = ctx->data + phdr->name_offset; + ctx->bytes_remaining = phdr->name_length; + break; + default: + break; + } + +Away: + return; +} + +void +parser_done(struct parser_context *ctx) +{ + if (!ctx) + return; + controlvm_payload_bytes_buffered -= ctx->param_bytes; + kfree(ctx); +} + +/** Return length of string not counting trailing spaces. */ +static int +string_length_no_trail(char *s, int len) +{ + int i = len - 1; + + while (i >= 0) { + if (!isspace(s[i])) + return i + 1; + i--; + } + return 0; +} + +/** Grab the next name and value out of the parameter buffer. + * The entire parameter buffer looks like this: + * =\0 + * =\0 + * ... + * \0 + * If successful, the next value is returned within the supplied + * buffer (the value is always upper-cased), and the corresponding + * is returned within a kmalloc()ed buffer, whose pointer is + * provided as the return value of this function. + * (The total number of bytes allocated is strlen()+1.) + * + * NULL is returned to indicate failure, which can occur for several reasons: + * - all = pairs have already been processed + * - bad parameter + * - parameter buffer ends prematurely (couldn't find an '=' or '\0' within + * the confines of the parameter buffer) + * - the buffer is not large enough to hold the of the next + * parameter + */ +void * +parser_param_get(struct parser_context *ctx, char *nam, int namesize) +{ + u8 *pscan, *pnam = nam; + ulong nscan; + int value_length = -1, orig_value_length = -1; + void *value = NULL; + int i; + int closing_quote = 0; + + if (!ctx) + return NULL; + pscan = ctx->curr; + nscan = ctx->bytes_remaining; + if (nscan == 0) + return NULL; + if (*pscan == '\0') + /* This is the normal return point after you have processed + * all of the = pairs in a syntactically-valid + * parameter buffer. + */ + return NULL; + + /* skip whitespace */ + while (isspace(*pscan)) { + pscan++; + nscan--; + if (nscan == 0) + return NULL; + } + + while (*pscan != ':') { + if (namesize <= 0) + return NULL; + *pnam = toupper(*pscan); + pnam++; + namesize--; + pscan++; + nscan--; + if (nscan == 0) + return NULL; + } + if (namesize <= 0) + return NULL; + *pnam = '\0'; + nam[string_length_no_trail(nam, strlen(nam))] = '\0'; + + /* point to char immediately after ":" in ":" */ + pscan++; + nscan--; + /* skip whitespace */ + while (isspace(*pscan)) { + pscan++; + nscan--; + if (nscan == 0) + return NULL; + } + if (nscan == 0) + return NULL; + if (*pscan == '\'' || *pscan == '"') { + closing_quote = *pscan; + pscan++; + nscan--; + if (nscan == 0) + return NULL; + } + + /* look for a separator character, terminator character, or + * end of data + */ + for (i = 0, value_length = -1; i < nscan; i++) { + if (closing_quote) { + if (pscan[i] == '\0') + return NULL; + if (pscan[i] == closing_quote) { + value_length = i; + break; + } + } else + if (pscan[i] == ',' || pscan[i] == ';' + || pscan[i] == '\0') { + value_length = i; + break; + } + } + if (value_length < 0) { + if (closing_quote) + return NULL; + value_length = nscan; + } + orig_value_length = value_length; + if (closing_quote == 0) + value_length = string_length_no_trail(pscan, orig_value_length); + value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY); + if (value == NULL) + return NULL; + memcpy(value, pscan, value_length); + ((u8 *) (value))[value_length] = '\0'; + + pscan += orig_value_length; + nscan -= orig_value_length; + + /* skip past separator or closing quote */ + if (nscan > 0) { + if (*pscan != '\0') { + pscan++; + nscan--; + } + } + + if (closing_quote && (nscan > 0)) { + /* we still need to skip around the real separator if present */ + /* first, skip whitespace */ + while (isspace(*pscan)) { + pscan++; + nscan--; + if (nscan == 0) + break; + } + if (nscan > 0) { + if (*pscan == ',' || *pscan == ';') { + pscan++; + nscan--; + } else if (*pscan != '\0') { + kfree(value); + value = NULL; + return NULL; + } + } + } + ctx->curr = pscan; + ctx->bytes_remaining = nscan; + return value; +} + +void * +parser_string_get(struct parser_context *ctx) +{ + u8 *pscan; + ulong nscan; + int value_length = -1; + void *value = NULL; + int i; + + if (!ctx) + return NULL; + pscan = ctx->curr; + nscan = ctx->bytes_remaining; + if (nscan == 0) + return NULL; + if (!pscan) + return NULL; + for (i = 0, value_length = -1; i < nscan; i++) + if (pscan[i] == '\0') { + value_length = i; + break; + } + if (value_length < 0) /* '\0' was not included in the length */ + value_length = nscan; + value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY); + if (value == NULL) + return NULL; + if (value_length > 0) + memcpy(value, pscan, value_length); + ((u8 *) (value))[value_length] = '\0'; + return value; +} diff --git a/kernel/drivers/staging/unisys/visorchipset/parser.h b/kernel/drivers/staging/unisys/visorchipset/parser.h new file mode 100644 index 000000000..2b903f1be --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchipset/parser.h @@ -0,0 +1,46 @@ +/* parser.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __PARSER_H__ +#define __PARSER_H__ + +#include + +#include "timskmod.h" +#include "channel.h" + +typedef enum { + PARSERSTRING_INITIATOR, + PARSERSTRING_TARGET, + PARSERSTRING_CONNECTION, + PARSERSTRING_NAME, +} PARSER_WHICH_STRING; + +struct parser_context *parser_init(u64 addr, u32 bytes, BOOL isLocal, + BOOL *tryAgain); +struct parser_context *parser_init_byte_stream(u64 addr, u32 bytes, BOOL local, + BOOL *retry); +void parser_param_start(struct parser_context *ctx, + PARSER_WHICH_STRING which_string); +void *parser_param_get(struct parser_context *ctx, char *nam, int namesize); +void *parser_string_get(struct parser_context *ctx); +uuid_le parser_id_get(struct parser_context *ctx); +char *parser_simpleString_get(struct parser_context *ctx); +void *parser_byte_stream_get(struct parser_context *ctx, ulong *nbytes); +void parser_done(struct parser_context *ctx); + +#endif diff --git a/kernel/drivers/staging/unisys/visorchipset/visorchipset.h b/kernel/drivers/staging/unisys/visorchipset/visorchipset.h new file mode 100644 index 000000000..bd46df9ef --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchipset/visorchipset.h @@ -0,0 +1,236 @@ +/* visorchipset.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __VISORCHIPSET_H__ +#define __VISORCHIPSET_H__ + +#include + +#include "timskmod.h" +#include "channel.h" +#include "controlvmchannel.h" +#include "parser.h" +#include "procobjecttree.h" +#include "vbusdeviceinfo.h" +#include "vbushelper.h" + +/** Describes the state from the perspective of which controlvm messages have + * been received for a bus or device. + */ +struct visorchipset_state { + u32 created:1; + u32 attached:1; + u32 configured:1; + u32 running:1; + /* Add new fields above. */ + /* Remaining bits in this 32-bit word are unused. */ +}; + +enum visorchipset_addresstype { + /** address is guest physical, but outside of the physical memory + * region that is controlled by the running OS (this is the normal + * address type for Supervisor channels) + */ + ADDRTYPE_LOCALPHYSICAL, + + /** address is guest physical, and withIN the confines of the + * physical memory controlled by the running OS. + */ + ADDRTYPE_LOCALTEST, +}; + +enum crash_obj_type { + CRASH_DEV, + CRASH_BUS, +}; + +/** Attributes for a particular Supervisor channel. + */ +struct visorchipset_channel_info { + enum visorchipset_addresstype addr_type; + HOSTADDRESS channel_addr; + struct irq_info intr; + u64 n_channel_bytes; + uuid_le channel_type_uuid; + uuid_le channel_inst_uuid; + +}; + +/** Attributes for a particular Supervisor device. + * Any visorchipset client can query these attributes using + * visorchipset_get_client_device_info() or + * visorchipset_get_server_device_info(). + */ +struct visorchipset_device_info { + struct list_head entry; + u32 bus_no; + u32 dev_no; + uuid_le dev_inst_uuid; + struct visorchipset_state state; + struct visorchipset_channel_info chan_info; + u32 reserved1; /* control_vm_id */ + u64 reserved2; + u32 switch_no; /* when devState.attached==1 */ + u32 internal_port_no; /* when devState.attached==1 */ + struct controlvm_message_header pending_msg_hdr;/* CONTROLVM_MESSAGE */ + /** For private use by the bus driver */ + void *bus_driver_context; + +}; + +static inline struct visorchipset_device_info *finddevice( + struct list_head *list, u32 bus_no, u32 dev_no) +{ + struct visorchipset_device_info *p; + + list_for_each_entry(p, list, entry) { + if (p->bus_no == bus_no && p->dev_no == dev_no) + return p; + } + return NULL; +} + +static inline void delbusdevices(struct list_head *list, u32 bus_no) +{ + struct visorchipset_device_info *p, *tmp; + + list_for_each_entry_safe(p, tmp, list, entry) { + if (p->bus_no == bus_no) { + list_del(&p->entry); + kfree(p); + } + } +} + +/** Attributes for a particular Supervisor bus. + * (For a service partition acting as the server for buses/devices, there + * is a 1-to-1 relationship between busses and guest partitions.) + * Any visorchipset client can query these attributes using + * visorchipset_get_client_bus_info() or visorchipset_get_bus_info(). + */ +struct visorchipset_bus_info { + struct list_head entry; + u32 bus_no; + struct visorchipset_state state; + struct visorchipset_channel_info chan_info; + uuid_le partition_uuid; + u64 partition_handle; + u8 *name; /* UTF8 */ + u8 *description; /* UTF8 */ + u64 reserved1; + u32 reserved2; + struct { + u32 server:1; + /* Add new fields above. */ + /* Remaining bits in this 32-bit word are unused. */ + } flags; + struct controlvm_message_header pending_msg_hdr;/* CONTROLVM MsgHdr */ + /** For private use by the bus driver */ + void *bus_driver_context; + u64 dev_no; + +}; + +static inline struct visorchipset_bus_info * +findbus(struct list_head *list, u32 bus_no) +{ + struct visorchipset_bus_info *p; + + list_for_each_entry(p, list, entry) { + if (p->bus_no == bus_no) + return p; + } + return NULL; +} + +/* These functions will be called from within visorchipset when certain + * events happen. (The implementation of these functions is outside of + * visorchipset.) + */ +struct visorchipset_busdev_notifiers { + void (*bus_create)(ulong bus_no); + void (*bus_destroy)(ulong bus_no); + void (*device_create)(ulong bus_no, ulong dev_no); + void (*device_destroy)(ulong bus_no, ulong dev_no); + void (*device_pause)(ulong bus_no, ulong dev_no); + void (*device_resume)(ulong bus_no, ulong dev_no); + int (*get_channel_info)(uuid_le type_uuid, ulong *min_size, + ulong *max_size); +}; + +/* These functions live inside visorchipset, and will be called to indicate + * responses to specific events (by code outside of visorchipset). + * For now, the value for each response is simply either: + * 0 = it worked + * -1 = it failed + */ +struct visorchipset_busdev_responders { + void (*bus_create)(ulong bus_no, int response); + void (*bus_destroy)(ulong bus_no, int response); + void (*device_create)(ulong bus_no, ulong dev_no, int response); + void (*device_destroy)(ulong bus_no, ulong dev_no, int response); + void (*device_pause)(ulong bus_no, ulong dev_no, int response); + void (*device_resume)(ulong bus_no, ulong dev_no, int response); +}; + +/** Register functions (in the bus driver) to get called by visorchipset + * whenever a bus or device appears for which this service partition is + * to be the server for. visorchipset will fill in , to + * indicate functions the bus driver should call to indicate message + * responses. + */ +void +visorchipset_register_busdev_client( + struct visorchipset_busdev_notifiers *notifiers, + struct visorchipset_busdev_responders *responders, + struct ultra_vbus_deviceinfo *driver_info); + +/** Register functions (in the bus driver) to get called by visorchipset + * whenever a bus or device appears for which this service partition is + * to be the client for. visorchipset will fill in , to + * indicate functions the bus driver should call to indicate message + * responses. + */ +void +visorchipset_register_busdev_server( + struct visorchipset_busdev_notifiers *notifiers, + struct visorchipset_busdev_responders *responders, + struct ultra_vbus_deviceinfo *driver_info); + +typedef void (*SPARREPORTEVENT_COMPLETE_FUNC) (struct controlvm_message *msg, + int status); + +void visorchipset_device_pause_response(ulong bus_no, ulong dev_no, + int response); + +BOOL visorchipset_get_bus_info(ulong bus_no, + struct visorchipset_bus_info *bus_info); +BOOL visorchipset_get_device_info(ulong bus_no, ulong dev_no, + struct visorchipset_device_info *dev_info); +BOOL visorchipset_set_bus_context(ulong bus_no, void *context); +BOOL visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context); +int visorchipset_chipset_ready(void); +int visorchipset_chipset_selftest(void); +int visorchipset_chipset_notready(void); +void visorchipset_save_message(struct controlvm_message *msg, + enum crash_obj_type type); +void *visorchipset_cache_alloc(struct kmem_cache *pool, + BOOL ok_to_block, char *fn, int ln); +void visorchipset_cache_free(struct kmem_cache *pool, void *p, + char *fn, int ln); + +#endif diff --git a/kernel/drivers/staging/unisys/visorchipset/visorchipset_main.c b/kernel/drivers/staging/unisys/visorchipset/visorchipset_main.c new file mode 100644 index 000000000..f2663d2c7 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchipset/visorchipset_main.c @@ -0,0 +1,2335 @@ +/* visorchipset_main.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#include "globals.h" +#include "visorchipset.h" +#include "procobjecttree.h" +#include "visorchannel.h" +#include "periodic_work.h" +#include "file.h" +#include "parser.h" +#include "uisutils.h" +#include "controlvmcompletionstatus.h" +#include "guestlinuxdebug.h" + +#include +#include +#include +#include + +#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c +#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for + * vnic loopback test */ +#define TEST_VNIC_SWITCHNO 1 +#define TEST_VNIC_BUSNO 9 + +#define MAX_NAME_SIZE 128 +#define MAX_IP_SIZE 50 +#define MAXOUTSTANDINGCHANNELCOMMAND 256 +#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1 +#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100 + +/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS, +* we switch to slow polling mode. As soon as we get a controlvm +* message, we switch back to fast polling mode. +*/ +#define MIN_IDLE_SECONDS 10 +static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST; +static ulong most_recent_message_jiffies; /* when we got our last + * controlvm message */ +static inline char * +NONULLSTR(char *s) +{ + if (s) + return s; + return ""; +} + +static int serverregistered; +static int clientregistered; + +#define MAX_CHIPSET_EVENTS 2 +static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 }; + +static struct delayed_work periodic_controlvm_work; +static struct workqueue_struct *periodic_controlvm_workqueue; +static DEFINE_SEMAPHORE(notifier_lock); + +static struct controlvm_message_header g_diag_msg_hdr; +static struct controlvm_message_header g_chipset_msg_hdr; +static struct controlvm_message_header g_del_dump_msg_hdr; +static const uuid_le spar_diag_pool_channel_protocol_uuid = + SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID; +/* 0xffffff is an invalid Bus/Device number */ +static ulong g_diagpool_bus_no = 0xffffff; +static ulong g_diagpool_dev_no = 0xffffff; +static struct controlvm_message_packet g_devicechangestate_packet; + +/* Only VNIC and VHBA channels are sent to visorclientbus (aka + * "visorhackbus") + */ +#define FOR_VISORHACKBUS(channel_type_guid) \ + (((uuid_le_cmp(channel_type_guid,\ + spar_vnic_channel_protocol_uuid) == 0) ||\ + (uuid_le_cmp(channel_type_guid,\ + spar_vhba_channel_protocol_uuid) == 0))) +#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid))) + +#define is_diagpool_channel(channel_type_guid) \ + (uuid_le_cmp(channel_type_guid,\ + spar_diag_pool_channel_protocol_uuid) == 0) + +static LIST_HEAD(bus_info_list); +static LIST_HEAD(dev_info_list); + +static struct visorchannel *controlvm_channel; + +/* Manages the request payload in the controlvm channel */ +static struct controlvm_payload_info { + u8 __iomem *ptr; /* pointer to base address of payload pool */ + u64 offset; /* offset from beginning of controlvm + * channel to beginning of payload * pool */ + u32 bytes; /* number of bytes in payload pool */ +} controlvm_payload_info; + +/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE / + * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation. + */ +static struct livedump_info { + struct controlvm_message_header dumpcapture_header; + struct controlvm_message_header gettextdump_header; + struct controlvm_message_header dumpcomplete_header; + BOOL gettextdump_outstanding; + u32 crc32; + ulong length; + atomic_t buffers_in_use; + ulong destination; +} livedump_info; + +/* The following globals are used to handle the scenario where we are unable to + * offload the payload from a controlvm message due to memory requirements. In + * this scenario, we simply stash the controlvm message, then attempt to + * process it again the next time controlvm_periodic_work() runs. + */ +static struct controlvm_message controlvm_pending_msg; +static BOOL controlvm_pending_msg_valid = FALSE; + +/* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming) + * TRANSMIT_FILE PutFile payloads. + */ +static struct kmem_cache *putfile_buffer_list_pool; +static const char putfile_buffer_list_pool_name[] = + "controlvm_putfile_buffer_list_pool"; + +/* This identifies a data buffer that has been received via a controlvm messages + * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation. + */ +struct putfile_buffer_entry { + struct list_head next; /* putfile_buffer_entry list */ + struct parser_context *parser_ctx; /* points to input data buffer */ +}; + +/* List of struct putfile_request *, via next_putfile_request member. + * Each entry in this list identifies an outstanding TRANSMIT_FILE + * conversation. + */ +static LIST_HEAD(putfile_request_list); + +/* This describes a buffer and its current state of transfer (e.g., how many + * bytes have already been supplied as putfile data, and how many bytes are + * remaining) for a putfile_request. + */ +struct putfile_active_buffer { + /* a payload from a controlvm message, containing a file data buffer */ + struct parser_context *parser_ctx; + /* points within data area of parser_ctx to next byte of data */ + u8 *pnext; + /* # bytes left from to the end of this data buffer */ + size_t bytes_remaining; +}; + +#define PUTFILE_REQUEST_SIG 0x0906101302281211 +/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE + * conversation. Structs of this type are dynamically linked into + * . + */ +struct putfile_request { + u64 sig; /* PUTFILE_REQUEST_SIG */ + + /* header from original TransmitFile request */ + struct controlvm_message_header controlvm_header; + u64 file_request_number; /* from original TransmitFile request */ + + /* link to next struct putfile_request */ + struct list_head next_putfile_request; + + /* most-recent sequence number supplied via a controlvm message */ + u64 data_sequence_number; + + /* head of putfile_buffer_entry list, which describes the data to be + * supplied as putfile data; + * - this list is added to when controlvm messages come in that supply + * file data + * - this list is removed from via the hotplug program that is actually + * consuming these buffers to write as file data */ + struct list_head input_buffer_list; + spinlock_t req_list_lock; /* lock for input_buffer_list */ + + /* waiters for input_buffer_list to go non-empty */ + wait_queue_head_t input_buffer_wq; + + /* data not yet read within current putfile_buffer_entry */ + struct putfile_active_buffer active_buf; + + /* <0 = failed, 0 = in-progress, >0 = successful; */ + /* note that this must be set with req_list_lock, and if you set <0, */ + /* it is your responsibility to also free up all of the other objects */ + /* in this struct (like input_buffer_list, active_buf.parser_ctx) */ + /* before releasing the lock */ + int completion_status; +}; + +static atomic_t visorchipset_cache_buffers_in_use = ATOMIC_INIT(0); + +struct parahotplug_request { + struct list_head list; + int id; + unsigned long expiration; + struct controlvm_message msg; +}; + +static LIST_HEAD(parahotplug_request_list); +static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */ +static void parahotplug_process_list(void); + +/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE / + * CONTROLVM_REPORTEVENT. + */ +static struct visorchipset_busdev_notifiers busdev_server_notifiers; +static struct visorchipset_busdev_notifiers busdev_client_notifiers; + +static void bus_create_response(ulong bus_no, int response); +static void bus_destroy_response(ulong bus_no, int response); +static void device_create_response(ulong bus_no, ulong dev_no, int response); +static void device_destroy_response(ulong bus_no, ulong dev_no, int response); +static void device_resume_response(ulong bus_no, ulong dev_no, int response); + +static struct visorchipset_busdev_responders busdev_responders = { + .bus_create = bus_create_response, + .bus_destroy = bus_destroy_response, + .device_create = device_create_response, + .device_destroy = device_destroy_response, + .device_pause = visorchipset_device_pause_response, + .device_resume = device_resume_response, +}; + +/* info for /dev/visorchipset */ +static dev_t major_dev = -1; /**< indicates major num for device */ + +/* prototypes for attributes */ +static ssize_t toolaction_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t toolaction_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static DEVICE_ATTR_RW(toolaction); + +static ssize_t boottotool_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t boottotool_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count); +static DEVICE_ATTR_RW(boottotool); + +static ssize_t error_show(struct device *dev, struct device_attribute *attr, + char *buf); +static ssize_t error_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +static DEVICE_ATTR_RW(error); + +static ssize_t textid_show(struct device *dev, struct device_attribute *attr, + char *buf); +static ssize_t textid_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +static DEVICE_ATTR_RW(textid); + +static ssize_t remaining_steps_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t remaining_steps_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static DEVICE_ATTR_RW(remaining_steps); + +static ssize_t chipsetready_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static DEVICE_ATTR_WO(chipsetready); + +static ssize_t devicedisabled_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static DEVICE_ATTR_WO(devicedisabled); + +static ssize_t deviceenabled_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static DEVICE_ATTR_WO(deviceenabled); + +static struct attribute *visorchipset_install_attrs[] = { + &dev_attr_toolaction.attr, + &dev_attr_boottotool.attr, + &dev_attr_error.attr, + &dev_attr_textid.attr, + &dev_attr_remaining_steps.attr, + NULL +}; + +static struct attribute_group visorchipset_install_group = { + .name = "install", + .attrs = visorchipset_install_attrs +}; + +static struct attribute *visorchipset_guest_attrs[] = { + &dev_attr_chipsetready.attr, + NULL +}; + +static struct attribute_group visorchipset_guest_group = { + .name = "guest", + .attrs = visorchipset_guest_attrs +}; + +static struct attribute *visorchipset_parahotplug_attrs[] = { + &dev_attr_devicedisabled.attr, + &dev_attr_deviceenabled.attr, + NULL +}; + +static struct attribute_group visorchipset_parahotplug_group = { + .name = "parahotplug", + .attrs = visorchipset_parahotplug_attrs +}; + +static const struct attribute_group *visorchipset_dev_groups[] = { + &visorchipset_install_group, + &visorchipset_guest_group, + &visorchipset_parahotplug_group, + NULL +}; + +/* /sys/devices/platform/visorchipset */ +static struct platform_device visorchipset_platform_device = { + .name = "visorchipset", + .id = -1, + .dev.groups = visorchipset_dev_groups, +}; + +/* Function prototypes */ +static void controlvm_respond(struct controlvm_message_header *msg_hdr, + int response); +static void controlvm_respond_chipset_init( + struct controlvm_message_header *msg_hdr, int response, + enum ultra_chipset_feature features); +static void controlvm_respond_physdev_changestate( + struct controlvm_message_header *msg_hdr, int response, + struct spar_segment_state state); + +static ssize_t toolaction_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 tool_action; + + visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + tool_action), &tool_action, sizeof(u8)); + return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action); +} + +static ssize_t toolaction_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u8 tool_action; + int ret; + + if (kstrtou8(buf, 10, &tool_action) != 0) + return -EINVAL; + + ret = visorchannel_write(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + tool_action), + &tool_action, sizeof(u8)); + + if (ret) + return ret; + return count; +} + +static ssize_t boottotool_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efi_spar_indication efi_spar_indication; + + visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + efi_spar_ind), &efi_spar_indication, + sizeof(struct efi_spar_indication)); + return scnprintf(buf, PAGE_SIZE, "%u\n", + efi_spar_indication.boot_to_tool); +} + +static ssize_t boottotool_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int val, ret; + struct efi_spar_indication efi_spar_indication; + + if (kstrtoint(buf, 10, &val) != 0) + return -EINVAL; + + efi_spar_indication.boot_to_tool = val; + ret = visorchannel_write(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + efi_spar_ind), &(efi_spar_indication), + sizeof(struct efi_spar_indication)); + + if (ret) + return ret; + return count; +} + +static ssize_t error_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u32 error; + + visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + installation_error), + &error, sizeof(u32)); + return scnprintf(buf, PAGE_SIZE, "%i\n", error); +} + +static ssize_t error_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 error; + int ret; + + if (kstrtou32(buf, 10, &error) != 0) + return -EINVAL; + + ret = visorchannel_write(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + installation_error), + &error, sizeof(u32)); + if (ret) + return ret; + return count; +} + +static ssize_t textid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u32 text_id; + + visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + installation_text_id), + &text_id, sizeof(u32)); + return scnprintf(buf, PAGE_SIZE, "%i\n", text_id); +} + +static ssize_t textid_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 text_id; + int ret; + + if (kstrtou32(buf, 10, &text_id) != 0) + return -EINVAL; + + ret = visorchannel_write(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + installation_text_id), + &text_id, sizeof(u32)); + if (ret) + return ret; + return count; +} + +static ssize_t remaining_steps_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u16 remaining_steps; + + visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + installation_remaining_steps), + &remaining_steps, sizeof(u16)); + return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps); +} + +static ssize_t remaining_steps_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u16 remaining_steps; + int ret; + + if (kstrtou16(buf, 10, &remaining_steps) != 0) + return -EINVAL; + + ret = visorchannel_write(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + installation_remaining_steps), + &remaining_steps, sizeof(u16)); + if (ret) + return ret; + return count; +} + +static void +bus_info_clear(void *v) +{ + struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v); + + kfree(p->name); + p->name = NULL; + + kfree(p->description); + p->description = NULL; + + p->state.created = 0; + memset(p, 0, sizeof(struct visorchipset_bus_info)); +} + +static void +dev_info_clear(void *v) +{ + struct visorchipset_device_info *p = + (struct visorchipset_device_info *)(v); + + p->state.created = 0; + memset(p, 0, sizeof(struct visorchipset_device_info)); +} + +static u8 +check_chipset_events(void) +{ + int i; + u8 send_msg = 1; + /* Check events to determine if response should be sent */ + for (i = 0; i < MAX_CHIPSET_EVENTS; i++) + send_msg &= chipset_events[i]; + return send_msg; +} + +static void +clear_chipset_events(void) +{ + int i; + /* Clear chipset_events */ + for (i = 0; i < MAX_CHIPSET_EVENTS; i++) + chipset_events[i] = 0; +} + +void +visorchipset_register_busdev_server( + struct visorchipset_busdev_notifiers *notifiers, + struct visorchipset_busdev_responders *responders, + struct ultra_vbus_deviceinfo *driver_info) +{ + down(¬ifier_lock); + if (!notifiers) { + memset(&busdev_server_notifiers, 0, + sizeof(busdev_server_notifiers)); + serverregistered = 0; /* clear flag */ + } else { + busdev_server_notifiers = *notifiers; + serverregistered = 1; /* set flag */ + } + if (responders) + *responders = busdev_responders; + if (driver_info) + bus_device_info_init(driver_info, "chipset", "visorchipset", + VERSION, NULL); + + up(¬ifier_lock); +} +EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server); + +void +visorchipset_register_busdev_client( + struct visorchipset_busdev_notifiers *notifiers, + struct visorchipset_busdev_responders *responders, + struct ultra_vbus_deviceinfo *driver_info) +{ + down(¬ifier_lock); + if (!notifiers) { + memset(&busdev_client_notifiers, 0, + sizeof(busdev_client_notifiers)); + clientregistered = 0; /* clear flag */ + } else { + busdev_client_notifiers = *notifiers; + clientregistered = 1; /* set flag */ + } + if (responders) + *responders = busdev_responders; + if (driver_info) + bus_device_info_init(driver_info, "chipset(bolts)", + "visorchipset", VERSION, NULL); + up(¬ifier_lock); +} +EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client); + +static void +cleanup_controlvm_structures(void) +{ + struct visorchipset_bus_info *bi, *tmp_bi; + struct visorchipset_device_info *di, *tmp_di; + + list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) { + bus_info_clear(bi); + list_del(&bi->entry); + kfree(bi); + } + + list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) { + dev_info_clear(di); + list_del(&di->entry); + kfree(di); + } +} + +static void +chipset_init(struct controlvm_message *inmsg) +{ + static int chipset_inited; + enum ultra_chipset_feature features = 0; + int rc = CONTROLVM_RESP_SUCCESS; + + POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO); + if (chipset_inited) { + rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; + goto cleanup; + } + chipset_inited = 1; + POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO); + + /* Set features to indicate we support parahotplug (if Command + * also supports it). */ + features = + inmsg->cmd.init_chipset. + features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG; + + /* Set the "reply" bit so Command knows this is a + * features-aware driver. */ + features |= ULTRA_CHIPSET_FEATURE_REPLY; + +cleanup: + if (rc < 0) + cleanup_controlvm_structures(); + if (inmsg->hdr.flags.response_expected) + controlvm_respond_chipset_init(&inmsg->hdr, rc, features); +} + +static void +controlvm_init_response(struct controlvm_message *msg, + struct controlvm_message_header *msg_hdr, int response) +{ + memset(msg, 0, sizeof(struct controlvm_message)); + memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header)); + msg->hdr.payload_bytes = 0; + msg->hdr.payload_vm_offset = 0; + msg->hdr.payload_max_bytes = 0; + if (response < 0) { + msg->hdr.flags.failed = 1; + msg->hdr.completion_status = (u32) (-response); + } +} + +static void +controlvm_respond(struct controlvm_message_header *msg_hdr, int response) +{ + struct controlvm_message outmsg; + + controlvm_init_response(&outmsg, msg_hdr, response); + /* For DiagPool channel DEVICE_CHANGESTATE, we need to send + * back the deviceChangeState structure in the packet. */ + if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE && + g_devicechangestate_packet.device_change_state.bus_no == + g_diagpool_bus_no && + g_devicechangestate_packet.device_change_state.dev_no == + g_diagpool_dev_no) + outmsg.cmd = g_devicechangestate_packet; + if (outmsg.hdr.flags.test_message == 1) + return; + + if (!visorchannel_signalinsert(controlvm_channel, + CONTROLVM_QUEUE_REQUEST, &outmsg)) { + return; + } +} + +static void +controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr, + int response, + enum ultra_chipset_feature features) +{ + struct controlvm_message outmsg; + + controlvm_init_response(&outmsg, msg_hdr, response); + outmsg.cmd.init_chipset.features = features; + if (!visorchannel_signalinsert(controlvm_channel, + CONTROLVM_QUEUE_REQUEST, &outmsg)) { + return; + } +} + +static void controlvm_respond_physdev_changestate( + struct controlvm_message_header *msg_hdr, int response, + struct spar_segment_state state) +{ + struct controlvm_message outmsg; + + controlvm_init_response(&outmsg, msg_hdr, response); + outmsg.cmd.device_change_state.state = state; + outmsg.cmd.device_change_state.flags.phys_device = 1; + if (!visorchannel_signalinsert(controlvm_channel, + CONTROLVM_QUEUE_REQUEST, &outmsg)) { + return; + } +} + +void +visorchipset_save_message(struct controlvm_message *msg, + enum crash_obj_type type) +{ + u32 crash_msg_offset; + u16 crash_msg_count; + + /* get saved message count */ + if (visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + saved_crash_message_count), + &crash_msg_count, sizeof(u16)) < 0) { + POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + + if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) { + POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC, + crash_msg_count, + POSTCODE_SEVERITY_ERR); + return; + } + + /* get saved crash message offset */ + if (visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + saved_crash_message_offset), + &crash_msg_offset, sizeof(u32)) < 0) { + POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + + if (type == CRASH_BUS) { + if (visorchannel_write(controlvm_channel, + crash_msg_offset, + msg, + sizeof(struct controlvm_message)) < 0) { + POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + } else { + if (visorchannel_write(controlvm_channel, + crash_msg_offset + + sizeof(struct controlvm_message), msg, + sizeof(struct controlvm_message)) < 0) { + POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + } +} +EXPORT_SYMBOL_GPL(visorchipset_save_message); + +static void +bus_responder(enum controlvm_id cmd_id, ulong bus_no, int response) +{ + struct visorchipset_bus_info *p = NULL; + BOOL need_clear = FALSE; + + p = findbus(&bus_info_list, bus_no); + if (!p) + return; + + if (response < 0) { + if ((cmd_id == CONTROLVM_BUS_CREATE) && + (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE))) + /* undo the row we just created... */ + delbusdevices(&dev_info_list, bus_no); + } else { + if (cmd_id == CONTROLVM_BUS_CREATE) + p->state.created = 1; + if (cmd_id == CONTROLVM_BUS_DESTROY) + need_clear = TRUE; + } + + if (p->pending_msg_hdr.id == CONTROLVM_INVALID) + return; /* no controlvm response needed */ + if (p->pending_msg_hdr.id != (u32)cmd_id) + return; + controlvm_respond(&p->pending_msg_hdr, response); + p->pending_msg_hdr.id = CONTROLVM_INVALID; + if (need_clear) { + bus_info_clear(p); + delbusdevices(&dev_info_list, bus_no); + } +} + +static void +device_changestate_responder(enum controlvm_id cmd_id, + ulong bus_no, ulong dev_no, int response, + struct spar_segment_state response_state) +{ + struct visorchipset_device_info *p = NULL; + struct controlvm_message outmsg; + + p = finddevice(&dev_info_list, bus_no, dev_no); + if (!p) + return; + if (p->pending_msg_hdr.id == CONTROLVM_INVALID) + return; /* no controlvm response needed */ + if (p->pending_msg_hdr.id != cmd_id) + return; + + controlvm_init_response(&outmsg, &p->pending_msg_hdr, response); + + outmsg.cmd.device_change_state.bus_no = bus_no; + outmsg.cmd.device_change_state.dev_no = dev_no; + outmsg.cmd.device_change_state.state = response_state; + + if (!visorchannel_signalinsert(controlvm_channel, + CONTROLVM_QUEUE_REQUEST, &outmsg)) + return; + + p->pending_msg_hdr.id = CONTROLVM_INVALID; +} + +static void +device_responder(enum controlvm_id cmd_id, ulong bus_no, ulong dev_no, + int response) +{ + struct visorchipset_device_info *p = NULL; + BOOL need_clear = FALSE; + + p = finddevice(&dev_info_list, bus_no, dev_no); + if (!p) + return; + if (response >= 0) { + if (cmd_id == CONTROLVM_DEVICE_CREATE) + p->state.created = 1; + if (cmd_id == CONTROLVM_DEVICE_DESTROY) + need_clear = TRUE; + } + + if (p->pending_msg_hdr.id == CONTROLVM_INVALID) + return; /* no controlvm response needed */ + + if (p->pending_msg_hdr.id != (u32)cmd_id) + return; + + controlvm_respond(&p->pending_msg_hdr, response); + p->pending_msg_hdr.id = CONTROLVM_INVALID; + if (need_clear) + dev_info_clear(p); +} + +static void +bus_epilog(u32 bus_no, + u32 cmd, struct controlvm_message_header *msg_hdr, + int response, BOOL need_response) +{ + BOOL notified = FALSE; + + struct visorchipset_bus_info *bus_info = findbus(&bus_info_list, + bus_no); + + if (!bus_info) + return; + + if (need_response) { + memcpy(&bus_info->pending_msg_hdr, msg_hdr, + sizeof(struct controlvm_message_header)); + } else { + bus_info->pending_msg_hdr.id = CONTROLVM_INVALID; + } + + down(¬ifier_lock); + if (response == CONTROLVM_RESP_SUCCESS) { + switch (cmd) { + case CONTROLVM_BUS_CREATE: + /* We can't tell from the bus_create + * information which of our 2 bus flavors the + * devices on this bus will ultimately end up. + * FORTUNATELY, it turns out it is harmless to + * send the bus_create to both of them. We can + * narrow things down a little bit, though, + * because we know: - BusDev_Server can handle + * either server or client devices + * - BusDev_Client can handle ONLY client + * devices */ + if (busdev_server_notifiers.bus_create) { + (*busdev_server_notifiers.bus_create) (bus_no); + notified = TRUE; + } + if ((!bus_info->flags.server) /*client */ && + busdev_client_notifiers.bus_create) { + (*busdev_client_notifiers.bus_create) (bus_no); + notified = TRUE; + } + break; + case CONTROLVM_BUS_DESTROY: + if (busdev_server_notifiers.bus_destroy) { + (*busdev_server_notifiers.bus_destroy) (bus_no); + notified = TRUE; + } + if ((!bus_info->flags.server) /*client */ && + busdev_client_notifiers.bus_destroy) { + (*busdev_client_notifiers.bus_destroy) (bus_no); + notified = TRUE; + } + break; + } + } + if (notified) + /* The callback function just called above is responsible + * for calling the appropriate visorchipset_busdev_responders + * function, which will call bus_responder() + */ + ; + else + bus_responder(cmd, bus_no, response); + up(¬ifier_lock); +} + +static void +device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd, + struct controlvm_message_header *msg_hdr, int response, + BOOL need_response, BOOL for_visorbus) +{ + struct visorchipset_busdev_notifiers *notifiers = NULL; + BOOL notified = FALSE; + + struct visorchipset_device_info *dev_info = + finddevice(&dev_info_list, bus_no, dev_no); + char *envp[] = { + "SPARSP_DIAGPOOL_PAUSED_STATE = 1", + NULL + }; + + if (!dev_info) + return; + + if (for_visorbus) + notifiers = &busdev_server_notifiers; + else + notifiers = &busdev_client_notifiers; + if (need_response) { + memcpy(&dev_info->pending_msg_hdr, msg_hdr, + sizeof(struct controlvm_message_header)); + } else { + dev_info->pending_msg_hdr.id = CONTROLVM_INVALID; + } + + down(¬ifier_lock); + if (response >= 0) { + switch (cmd) { + case CONTROLVM_DEVICE_CREATE: + if (notifiers->device_create) { + (*notifiers->device_create) (bus_no, dev_no); + notified = TRUE; + } + break; + case CONTROLVM_DEVICE_CHANGESTATE: + /* ServerReady / ServerRunning / SegmentStateRunning */ + if (state.alive == segment_state_running.alive && + state.operating == + segment_state_running.operating) { + if (notifiers->device_resume) { + (*notifiers->device_resume) (bus_no, + dev_no); + notified = TRUE; + } + } + /* ServerNotReady / ServerLost / SegmentStateStandby */ + else if (state.alive == segment_state_standby.alive && + state.operating == + segment_state_standby.operating) { + /* technically this is standby case + * where server is lost + */ + if (notifiers->device_pause) { + (*notifiers->device_pause) (bus_no, + dev_no); + notified = TRUE; + } + } else if (state.alive == segment_state_paused.alive && + state.operating == + segment_state_paused.operating) { + /* this is lite pause where channel is + * still valid just 'pause' of it + */ + if (bus_no == g_diagpool_bus_no && + dev_no == g_diagpool_dev_no) { + /* this will trigger the + * diag_shutdown.sh script in + * the visorchipset hotplug */ + kobject_uevent_env + (&visorchipset_platform_device.dev. + kobj, KOBJ_ONLINE, envp); + } + } + break; + case CONTROLVM_DEVICE_DESTROY: + if (notifiers->device_destroy) { + (*notifiers->device_destroy) (bus_no, dev_no); + notified = TRUE; + } + break; + } + } + if (notified) + /* The callback function just called above is responsible + * for calling the appropriate visorchipset_busdev_responders + * function, which will call device_responder() + */ + ; + else + device_responder(cmd, bus_no, dev_no, response); + up(¬ifier_lock); +} + +static void +bus_create(struct controlvm_message *inmsg) +{ + struct controlvm_message_packet *cmd = &inmsg->cmd; + ulong bus_no = cmd->create_bus.bus_no; + int rc = CONTROLVM_RESP_SUCCESS; + struct visorchipset_bus_info *bus_info = NULL; + + bus_info = findbus(&bus_info_list, bus_no); + if (bus_info && (bus_info->state.created == 1)) { + POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, + POSTCODE_SEVERITY_ERR); + rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; + goto cleanup; + } + bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL); + if (!bus_info) { + POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no, + POSTCODE_SEVERITY_ERR); + rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; + goto cleanup; + } + + INIT_LIST_HEAD(&bus_info->entry); + bus_info->bus_no = bus_no; + bus_info->dev_no = cmd->create_bus.dev_count; + + POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO); + + if (inmsg->hdr.flags.test_message == 1) + bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST; + else + bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL; + + bus_info->flags.server = inmsg->hdr.flags.server; + bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr; + bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes; + bus_info->chan_info.channel_type_uuid = + cmd->create_bus.bus_data_type_uuid; + bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid; + + list_add(&bus_info->entry, &bus_info_list); + + POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO); + +cleanup: + bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr, + rc, inmsg->hdr.flags.response_expected == 1); +} + +static void +bus_destroy(struct controlvm_message *inmsg) +{ + struct controlvm_message_packet *cmd = &inmsg->cmd; + ulong bus_no = cmd->destroy_bus.bus_no; + struct visorchipset_bus_info *bus_info; + int rc = CONTROLVM_RESP_SUCCESS; + + bus_info = findbus(&bus_info_list, bus_no); + if (!bus_info) + rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; + else if (bus_info->state.created == 0) + rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; + + bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr, + rc, inmsg->hdr.flags.response_expected == 1); +} + +static void +bus_configure(struct controlvm_message *inmsg, + struct parser_context *parser_ctx) +{ + struct controlvm_message_packet *cmd = &inmsg->cmd; + ulong bus_no = cmd->configure_bus.bus_no; + struct visorchipset_bus_info *bus_info = NULL; + int rc = CONTROLVM_RESP_SUCCESS; + char s[99]; + + bus_no = cmd->configure_bus.bus_no; + POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no, + POSTCODE_SEVERITY_INFO); + + bus_info = findbus(&bus_info_list, bus_no); + if (!bus_info) { + POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no, + POSTCODE_SEVERITY_ERR); + rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; + } else if (bus_info->state.created == 0) { + POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no, + POSTCODE_SEVERITY_ERR); + rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; + } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) { + POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no, + POSTCODE_SEVERITY_ERR); + rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; + } else { + bus_info->partition_handle = cmd->configure_bus.guest_handle; + bus_info->partition_uuid = parser_id_get(parser_ctx); + parser_param_start(parser_ctx, PARSERSTRING_NAME); + bus_info->name = parser_string_get(parser_ctx); + + visorchannel_uuid_id(&bus_info->partition_uuid, s); + POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no, + POSTCODE_SEVERITY_INFO); + } + bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr, + rc, inmsg->hdr.flags.response_expected == 1); +} + +static void +my_device_create(struct controlvm_message *inmsg) +{ + struct controlvm_message_packet *cmd = &inmsg->cmd; + ulong bus_no = cmd->create_device.bus_no; + ulong dev_no = cmd->create_device.dev_no; + struct visorchipset_device_info *dev_info = NULL; + struct visorchipset_bus_info *bus_info = NULL; + int rc = CONTROLVM_RESP_SUCCESS; + + dev_info = finddevice(&dev_info_list, bus_no, dev_no); + if (dev_info && (dev_info->state.created == 1)) { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; + goto cleanup; + } + bus_info = findbus(&bus_info_list, bus_no); + if (!bus_info) { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; + goto cleanup; + } + if (bus_info->state.created == 0) { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; + goto cleanup; + } + dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); + if (!dev_info) { + POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED; + goto cleanup; + } + + INIT_LIST_HEAD(&dev_info->entry); + dev_info->bus_no = bus_no; + dev_info->dev_no = dev_no; + dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid; + POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no, + POSTCODE_SEVERITY_INFO); + + if (inmsg->hdr.flags.test_message == 1) + dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST; + else + dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL; + dev_info->chan_info.channel_addr = cmd->create_device.channel_addr; + dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes; + dev_info->chan_info.channel_type_uuid = + cmd->create_device.data_type_uuid; + dev_info->chan_info.intr = cmd->create_device.intr; + list_add(&dev_info->entry, &dev_info_list); + POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no, + POSTCODE_SEVERITY_INFO); +cleanup: + /* get the bus and devNo for DiagPool channel */ + if (dev_info && + is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) { + g_diagpool_bus_no = bus_no; + g_diagpool_dev_no = dev_no; + } + device_epilog(bus_no, dev_no, segment_state_running, + CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc, + inmsg->hdr.flags.response_expected == 1, + FOR_VISORBUS(dev_info->chan_info.channel_type_uuid)); +} + +static void +my_device_changestate(struct controlvm_message *inmsg) +{ + struct controlvm_message_packet *cmd = &inmsg->cmd; + ulong bus_no = cmd->device_change_state.bus_no; + ulong dev_no = cmd->device_change_state.dev_no; + struct spar_segment_state state = cmd->device_change_state.state; + struct visorchipset_device_info *dev_info = NULL; + int rc = CONTROLVM_RESP_SUCCESS; + + dev_info = finddevice(&dev_info_list, bus_no, dev_no); + if (!dev_info) { + POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID; + } else if (dev_info->state.created == 0) { + POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no, + POSTCODE_SEVERITY_ERR); + rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID; + } + if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info) + device_epilog(bus_no, dev_no, state, + CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc, + inmsg->hdr.flags.response_expected == 1, + FOR_VISORBUS( + dev_info->chan_info.channel_type_uuid)); +} + +static void +my_device_destroy(struct controlvm_message *inmsg) +{ + struct controlvm_message_packet *cmd = &inmsg->cmd; + ulong bus_no = cmd->destroy_device.bus_no; + ulong dev_no = cmd->destroy_device.dev_no; + struct visorchipset_device_info *dev_info = NULL; + int rc = CONTROLVM_RESP_SUCCESS; + + dev_info = finddevice(&dev_info_list, bus_no, dev_no); + if (!dev_info) + rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID; + else if (dev_info->state.created == 0) + rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE; + + if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info) + device_epilog(bus_no, dev_no, segment_state_running, + CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc, + inmsg->hdr.flags.response_expected == 1, + FOR_VISORBUS( + dev_info->chan_info.channel_type_uuid)); +} + +/* When provided with the physical address of the controlvm channel + * (phys_addr), the offset to the payload area we need to manage + * (offset), and the size of this payload area (bytes), fills in the + * controlvm_payload_info struct. Returns TRUE for success or FALSE + * for failure. + */ +static int +initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes, + struct controlvm_payload_info *info) +{ + u8 __iomem *payload = NULL; + int rc = CONTROLVM_RESP_SUCCESS; + + if (!info) { + rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID; + goto cleanup; + } + memset(info, 0, sizeof(struct controlvm_payload_info)); + if ((offset == 0) || (bytes == 0)) { + rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID; + goto cleanup; + } + payload = ioremap_cache(phys_addr + offset, bytes); + if (!payload) { + rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED; + goto cleanup; + } + + info->offset = offset; + info->bytes = bytes; + info->ptr = payload; + +cleanup: + if (rc < 0) { + if (payload) { + iounmap(payload); + payload = NULL; + } + } + return rc; +} + +static void +destroy_controlvm_payload_info(struct controlvm_payload_info *info) +{ + if (info->ptr) { + iounmap(info->ptr); + info->ptr = NULL; + } + memset(info, 0, sizeof(struct controlvm_payload_info)); +} + +static void +initialize_controlvm_payload(void) +{ + HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel); + u64 payload_offset = 0; + u32 payload_bytes = 0; + + if (visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + request_payload_offset), + &payload_offset, sizeof(payload_offset)) < 0) { + POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + if (visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + request_payload_bytes), + &payload_bytes, sizeof(payload_bytes)) < 0) { + POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + initialize_controlvm_payload_info(phys_addr, + payload_offset, payload_bytes, + &controlvm_payload_info); +} + +/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset. + * Returns CONTROLVM_RESP_xxx code. + */ +int +visorchipset_chipset_ready(void) +{ + kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE); + return CONTROLVM_RESP_SUCCESS; +} +EXPORT_SYMBOL_GPL(visorchipset_chipset_ready); + +int +visorchipset_chipset_selftest(void) +{ + char env_selftest[20]; + char *envp[] = { env_selftest, NULL }; + + sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1); + kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE, + envp); + return CONTROLVM_RESP_SUCCESS; +} +EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest); + +/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset. + * Returns CONTROLVM_RESP_xxx code. + */ +int +visorchipset_chipset_notready(void) +{ + kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE); + return CONTROLVM_RESP_SUCCESS; +} +EXPORT_SYMBOL_GPL(visorchipset_chipset_notready); + +static void +chipset_ready(struct controlvm_message_header *msg_hdr) +{ + int rc = visorchipset_chipset_ready(); + + if (rc != CONTROLVM_RESP_SUCCESS) + rc = -rc; + if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready) + controlvm_respond(msg_hdr, rc); + if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) { + /* Send CHIPSET_READY response when all modules have been loaded + * and disks mounted for the partition + */ + g_chipset_msg_hdr = *msg_hdr; + } +} + +static void +chipset_selftest(struct controlvm_message_header *msg_hdr) +{ + int rc = visorchipset_chipset_selftest(); + + if (rc != CONTROLVM_RESP_SUCCESS) + rc = -rc; + if (msg_hdr->flags.response_expected) + controlvm_respond(msg_hdr, rc); +} + +static void +chipset_notready(struct controlvm_message_header *msg_hdr) +{ + int rc = visorchipset_chipset_notready(); + + if (rc != CONTROLVM_RESP_SUCCESS) + rc = -rc; + if (msg_hdr->flags.response_expected) + controlvm_respond(msg_hdr, rc); +} + +/* This is your "one-stop" shop for grabbing the next message from the + * CONTROLVM_QUEUE_EVENT queue in the controlvm channel. + */ +static BOOL +read_controlvm_event(struct controlvm_message *msg) +{ + if (visorchannel_signalremove(controlvm_channel, + CONTROLVM_QUEUE_EVENT, msg)) { + /* got a message */ + if (msg->hdr.flags.test_message == 1) + return FALSE; + return TRUE; + } + return FALSE; +} + +/* + * The general parahotplug flow works as follows. The visorchipset + * driver receives a DEVICE_CHANGESTATE message from Command + * specifying a physical device to enable or disable. The CONTROLVM + * message handler calls parahotplug_process_message, which then adds + * the message to a global list and kicks off a udev event which + * causes a user level script to enable or disable the specified + * device. The udev script then writes to + * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write + * to get called, at which point the appropriate CONTROLVM message is + * retrieved from the list and responded to. + */ + +#define PARAHOTPLUG_TIMEOUT_MS 2000 + +/* + * Generate unique int to match an outstanding CONTROLVM message with a + * udev script /proc response + */ +static int +parahotplug_next_id(void) +{ + static atomic_t id = ATOMIC_INIT(0); + + return atomic_inc_return(&id); +} + +/* + * Returns the time (in jiffies) when a CONTROLVM message on the list + * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future + */ +static unsigned long +parahotplug_next_expiration(void) +{ + return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS); +} + +/* + * Create a parahotplug_request, which is basically a wrapper for a + * CONTROLVM_MESSAGE that we can stick on a list + */ +static struct parahotplug_request * +parahotplug_request_create(struct controlvm_message *msg) +{ + struct parahotplug_request *req; + + req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY); + if (!req) + return NULL; + + req->id = parahotplug_next_id(); + req->expiration = parahotplug_next_expiration(); + req->msg = *msg; + + return req; +} + +/* + * Free a parahotplug_request. + */ +static void +parahotplug_request_destroy(struct parahotplug_request *req) +{ + kfree(req); +} + +/* + * Cause uevent to run the user level script to do the disable/enable + * specified in (the CONTROLVM message in) the specified + * parahotplug_request + */ +static void +parahotplug_request_kickoff(struct parahotplug_request *req) +{ + struct controlvm_message_packet *cmd = &req->msg.cmd; + char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40], + env_func[40]; + char *envp[] = { + env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL + }; + + sprintf(env_cmd, "SPAR_PARAHOTPLUG=1"); + sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id); + sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d", + cmd->device_change_state.state.active); + sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d", + cmd->device_change_state.bus_no); + sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d", + cmd->device_change_state.dev_no >> 3); + sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d", + cmd->device_change_state.dev_no & 0x7); + + kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE, + envp); +} + +/* + * Remove any request from the list that's been on there too long and + * respond with an error. + */ +static void +parahotplug_process_list(void) +{ + struct list_head *pos = NULL; + struct list_head *tmp = NULL; + + spin_lock(¶hotplug_request_list_lock); + + list_for_each_safe(pos, tmp, ¶hotplug_request_list) { + struct parahotplug_request *req = + list_entry(pos, struct parahotplug_request, list); + + if (!time_after_eq(jiffies, req->expiration)) + continue; + + list_del(pos); + if (req->msg.hdr.flags.response_expected) + controlvm_respond_physdev_changestate( + &req->msg.hdr, + CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT, + req->msg.cmd.device_change_state.state); + parahotplug_request_destroy(req); + } + + spin_unlock(¶hotplug_request_list_lock); +} + +/* + * Called from the /proc handler, which means the user script has + * finished the enable/disable. Find the matching identifier, and + * respond to the CONTROLVM message with success. + */ +static int +parahotplug_request_complete(int id, u16 active) +{ + struct list_head *pos = NULL; + struct list_head *tmp = NULL; + + spin_lock(¶hotplug_request_list_lock); + + /* Look for a request matching "id". */ + list_for_each_safe(pos, tmp, ¶hotplug_request_list) { + struct parahotplug_request *req = + list_entry(pos, struct parahotplug_request, list); + if (req->id == id) { + /* Found a match. Remove it from the list and + * respond. + */ + list_del(pos); + spin_unlock(¶hotplug_request_list_lock); + req->msg.cmd.device_change_state.state.active = active; + if (req->msg.hdr.flags.response_expected) + controlvm_respond_physdev_changestate( + &req->msg.hdr, CONTROLVM_RESP_SUCCESS, + req->msg.cmd.device_change_state.state); + parahotplug_request_destroy(req); + return 0; + } + } + + spin_unlock(¶hotplug_request_list_lock); + return -1; +} + +/* + * Enables or disables a PCI device by kicking off a udev script + */ +static void +parahotplug_process_message(struct controlvm_message *inmsg) +{ + struct parahotplug_request *req; + + req = parahotplug_request_create(inmsg); + + if (!req) + return; + + if (inmsg->cmd.device_change_state.state.active) { + /* For enable messages, just respond with success + * right away. This is a bit of a hack, but there are + * issues with the early enable messages we get (with + * either the udev script not detecting that the device + * is up, or not getting called at all). Fortunately + * the messages that get lost don't matter anyway, as + * devices are automatically enabled at + * initialization. + */ + parahotplug_request_kickoff(req); + controlvm_respond_physdev_changestate(&inmsg->hdr, + CONTROLVM_RESP_SUCCESS, + inmsg->cmd.device_change_state.state); + parahotplug_request_destroy(req); + } else { + /* For disable messages, add the request to the + * request list before kicking off the udev script. It + * won't get responded to until the script has + * indicated it's done. + */ + spin_lock(¶hotplug_request_list_lock); + list_add_tail(&req->list, ¶hotplug_request_list); + spin_unlock(¶hotplug_request_list_lock); + + parahotplug_request_kickoff(req); + } +} + +/* Process a controlvm message. + * Return result: + * FALSE - this function will return FALSE only in the case where the + * controlvm message was NOT processed, but processing must be + * retried before reading the next controlvm message; a + * scenario where this can occur is when we need to throttle + * the allocation of memory in which to copy out controlvm + * payload data + * TRUE - processing of the controlvm message completed, + * either successfully or with an error. + */ +static BOOL +handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr) +{ + struct controlvm_message_packet *cmd = &inmsg.cmd; + u64 parm_addr = 0; + u32 parm_bytes = 0; + struct parser_context *parser_ctx = NULL; + bool local_addr = false; + struct controlvm_message ackmsg; + + /* create parsing context if necessary */ + local_addr = (inmsg.hdr.flags.test_message == 1); + if (channel_addr == 0) + return TRUE; + parm_addr = channel_addr + inmsg.hdr.payload_vm_offset; + parm_bytes = inmsg.hdr.payload_bytes; + + /* Parameter and channel addresses within test messages actually lie + * within our OS-controlled memory. We need to know that, because it + * makes a difference in how we compute the virtual address. + */ + if (parm_addr != 0 && parm_bytes != 0) { + BOOL retry = FALSE; + + parser_ctx = + parser_init_byte_stream(parm_addr, parm_bytes, + local_addr, &retry); + if (!parser_ctx && retry) + return FALSE; + } + + if (!local_addr) { + controlvm_init_response(&ackmsg, &inmsg.hdr, + CONTROLVM_RESP_SUCCESS); + if (controlvm_channel) + visorchannel_signalinsert(controlvm_channel, + CONTROLVM_QUEUE_ACK, + &ackmsg); + } + switch (inmsg.hdr.id) { + case CONTROLVM_CHIPSET_INIT: + chipset_init(&inmsg); + break; + case CONTROLVM_BUS_CREATE: + bus_create(&inmsg); + break; + case CONTROLVM_BUS_DESTROY: + bus_destroy(&inmsg); + break; + case CONTROLVM_BUS_CONFIGURE: + bus_configure(&inmsg, parser_ctx); + break; + case CONTROLVM_DEVICE_CREATE: + my_device_create(&inmsg); + break; + case CONTROLVM_DEVICE_CHANGESTATE: + if (cmd->device_change_state.flags.phys_device) { + parahotplug_process_message(&inmsg); + } else { + /* save the hdr and cmd structures for later use */ + /* when sending back the response to Command */ + my_device_changestate(&inmsg); + g_diag_msg_hdr = inmsg.hdr; + g_devicechangestate_packet = inmsg.cmd; + break; + } + break; + case CONTROLVM_DEVICE_DESTROY: + my_device_destroy(&inmsg); + break; + case CONTROLVM_DEVICE_CONFIGURE: + /* no op for now, just send a respond that we passed */ + if (inmsg.hdr.flags.response_expected) + controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS); + break; + case CONTROLVM_CHIPSET_READY: + chipset_ready(&inmsg.hdr); + break; + case CONTROLVM_CHIPSET_SELFTEST: + chipset_selftest(&inmsg.hdr); + break; + case CONTROLVM_CHIPSET_STOP: + chipset_notready(&inmsg.hdr); + break; + default: + if (inmsg.hdr.flags.response_expected) + controlvm_respond(&inmsg.hdr, + -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN); + break; + } + + if (parser_ctx) { + parser_done(parser_ctx); + parser_ctx = NULL; + } + return TRUE; +} + +static HOSTADDRESS controlvm_get_channel_address(void) +{ + u64 addr = 0; + u32 size = 0; + + if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size))) + return 0; + + return addr; +} + +static void +controlvm_periodic_work(struct work_struct *work) +{ + struct controlvm_message inmsg; + BOOL got_command = FALSE; + BOOL handle_command_failed = FALSE; + static u64 poll_count; + + /* make sure visorbus server is registered for controlvm callbacks */ + if (visorchipset_serverregwait && !serverregistered) + goto cleanup; + /* make sure visorclientbus server is regsitered for controlvm + * callbacks + */ + if (visorchipset_clientregwait && !clientregistered) + goto cleanup; + + poll_count++; + if (poll_count >= 250) + ; /* keep going */ + else + goto cleanup; + + /* Check events to determine if response to CHIPSET_READY + * should be sent + */ + if (visorchipset_holdchipsetready && + (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) { + if (check_chipset_events() == 1) { + controlvm_respond(&g_chipset_msg_hdr, 0); + clear_chipset_events(); + memset(&g_chipset_msg_hdr, 0, + sizeof(struct controlvm_message_header)); + } + } + + while (visorchannel_signalremove(controlvm_channel, + CONTROLVM_QUEUE_RESPONSE, + &inmsg)) + ; + if (!got_command) { + if (controlvm_pending_msg_valid) { + /* we throttled processing of a prior + * msg, so try to process it again + * rather than reading a new one + */ + inmsg = controlvm_pending_msg; + controlvm_pending_msg_valid = FALSE; + got_command = true; + } else { + got_command = read_controlvm_event(&inmsg); + } + } + + handle_command_failed = FALSE; + while (got_command && (!handle_command_failed)) { + most_recent_message_jiffies = jiffies; + if (handle_command(inmsg, + visorchannel_get_physaddr + (controlvm_channel))) + got_command = read_controlvm_event(&inmsg); + else { + /* this is a scenario where throttling + * is required, but probably NOT an + * error...; we stash the current + * controlvm msg so we will attempt to + * reprocess it on our next loop + */ + handle_command_failed = TRUE; + controlvm_pending_msg = inmsg; + controlvm_pending_msg_valid = TRUE; + } + } + + /* parahotplug_worker */ + parahotplug_process_list(); + +cleanup: + + if (time_after(jiffies, + most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) { + /* it's been longer than MIN_IDLE_SECONDS since we + * processed our last controlvm message; slow down the + * polling + */ + if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW) + poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW; + } else { + if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST) + poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST; + } + + queue_delayed_work(periodic_controlvm_workqueue, + &periodic_controlvm_work, poll_jiffies); +} + +static void +setup_crash_devices_work_queue(struct work_struct *work) +{ + struct controlvm_message local_crash_bus_msg; + struct controlvm_message local_crash_dev_msg; + struct controlvm_message msg; + u32 local_crash_msg_offset; + u16 local_crash_msg_count; + + /* make sure visorbus server is registered for controlvm callbacks */ + if (visorchipset_serverregwait && !serverregistered) + goto cleanup; + + /* make sure visorclientbus server is regsitered for controlvm + * callbacks + */ + if (visorchipset_clientregwait && !clientregistered) + goto cleanup; + + POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO); + + /* send init chipset msg */ + msg.hdr.id = CONTROLVM_CHIPSET_INIT; + msg.cmd.init_chipset.bus_count = 23; + msg.cmd.init_chipset.switch_count = 0; + + chipset_init(&msg); + + /* get saved message count */ + if (visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + saved_crash_message_count), + &local_crash_msg_count, sizeof(u16)) < 0) { + POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + + if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) { + POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC, + local_crash_msg_count, + POSTCODE_SEVERITY_ERR); + return; + } + + /* get saved crash message offset */ + if (visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + saved_crash_message_offset), + &local_crash_msg_offset, sizeof(u32)) < 0) { + POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + + /* read create device message for storage bus offset */ + if (visorchannel_read(controlvm_channel, + local_crash_msg_offset, + &local_crash_bus_msg, + sizeof(struct controlvm_message)) < 0) { + POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + + /* read create device message for storage device */ + if (visorchannel_read(controlvm_channel, + local_crash_msg_offset + + sizeof(struct controlvm_message), + &local_crash_dev_msg, + sizeof(struct controlvm_message)) < 0) { + POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + + /* reuse IOVM create bus message */ + if (local_crash_bus_msg.cmd.create_bus.channel_addr != 0) { + bus_create(&local_crash_bus_msg); + } else { + POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + + /* reuse create device message for storage device */ + if (local_crash_dev_msg.cmd.create_device.channel_addr != 0) { + my_device_create(&local_crash_dev_msg); + } else { + POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO); + return; + +cleanup: + + poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW; + + queue_delayed_work(periodic_controlvm_workqueue, + &periodic_controlvm_work, poll_jiffies); +} + +static void +bus_create_response(ulong bus_no, int response) +{ + bus_responder(CONTROLVM_BUS_CREATE, bus_no, response); +} + +static void +bus_destroy_response(ulong bus_no, int response) +{ + bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response); +} + +static void +device_create_response(ulong bus_no, ulong dev_no, int response) +{ + device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response); +} + +static void +device_destroy_response(ulong bus_no, ulong dev_no, int response) +{ + device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response); +} + +void +visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response) +{ + device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, + bus_no, dev_no, response, + segment_state_standby); +} +EXPORT_SYMBOL_GPL(visorchipset_device_pause_response); + +static void +device_resume_response(ulong bus_no, ulong dev_no, int response) +{ + device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, + bus_no, dev_no, response, + segment_state_running); +} + +BOOL +visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info) +{ + void *p = findbus(&bus_info_list, bus_no); + + if (!p) + return FALSE; + memcpy(bus_info, p, sizeof(struct visorchipset_bus_info)); + return TRUE; +} +EXPORT_SYMBOL_GPL(visorchipset_get_bus_info); + +BOOL +visorchipset_set_bus_context(ulong bus_no, void *context) +{ + struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no); + + if (!p) + return FALSE; + p->bus_driver_context = context; + return TRUE; +} +EXPORT_SYMBOL_GPL(visorchipset_set_bus_context); + +BOOL +visorchipset_get_device_info(ulong bus_no, ulong dev_no, + struct visorchipset_device_info *dev_info) +{ + void *p = finddevice(&dev_info_list, bus_no, dev_no); + + if (!p) + return FALSE; + memcpy(dev_info, p, sizeof(struct visorchipset_device_info)); + return TRUE; +} +EXPORT_SYMBOL_GPL(visorchipset_get_device_info); + +BOOL +visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context) +{ + struct visorchipset_device_info *p = + finddevice(&dev_info_list, bus_no, dev_no); + + if (!p) + return FALSE; + p->bus_driver_context = context; + return TRUE; +} +EXPORT_SYMBOL_GPL(visorchipset_set_device_context); + +/* Generic wrapper function for allocating memory from a kmem_cache pool. + */ +void * +visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block, + char *fn, int ln) +{ + gfp_t gfp; + void *p; + + if (ok_to_block) + gfp = GFP_KERNEL; + else + gfp = GFP_ATOMIC; + /* __GFP_NORETRY means "ok to fail", meaning + * kmem_cache_alloc() can return NULL, implying the caller CAN + * cope with failure. If you do NOT specify __GFP_NORETRY, + * Linux will go to extreme measures to get memory for you + * (like, invoke oom killer), which will probably cripple the + * system. + */ + gfp |= __GFP_NORETRY; + p = kmem_cache_alloc(pool, gfp); + if (!p) + return NULL; + + atomic_inc(&visorchipset_cache_buffers_in_use); + return p; +} + +/* Generic wrapper function for freeing memory from a kmem_cache pool. + */ +void +visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln) +{ + if (!p) + return; + + atomic_dec(&visorchipset_cache_buffers_in_use); + kmem_cache_free(pool, p); +} + +static ssize_t chipsetready_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char msgtype[64]; + + if (sscanf(buf, "%63s", msgtype) != 1) + return -EINVAL; + + if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) { + chipset_events[0] = 1; + return count; + } else if (strcmp(msgtype, "MODULES_LOADED") == 0) { + chipset_events[1] = 1; + return count; + } + return -EINVAL; +} + +/* The parahotplug/devicedisabled interface gets called by our support script + * when an SR-IOV device has been shut down. The ID is passed to the script + * and then passed back when the device has been removed. + */ +static ssize_t devicedisabled_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + uint id; + + if (kstrtouint(buf, 10, &id) != 0) + return -EINVAL; + + parahotplug_request_complete(id, 0); + return count; +} + +/* The parahotplug/deviceenabled interface gets called by our support script + * when an SR-IOV device has been recovered. The ID is passed to the script + * and then passed back when the device has been brought back up. + */ +static ssize_t deviceenabled_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + uint id; + + if (kstrtouint(buf, 10, &id) != 0) + return -EINVAL; + + parahotplug_request_complete(id, 1); + return count; +} + +static int __init +visorchipset_init(void) +{ + int rc = 0, x = 0; + HOSTADDRESS addr; + + if (!unisys_spar_platform) + return -ENODEV; + + memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers)); + memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers)); + memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info)); + memset(&livedump_info, 0, sizeof(livedump_info)); + atomic_set(&livedump_info.buffers_in_use, 0); + + if (visorchipset_testvnic) { + POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR); + rc = x; + goto cleanup; + } + + addr = controlvm_get_channel_address(); + if (addr != 0) { + controlvm_channel = + visorchannel_create_with_lock + (addr, + sizeof(struct spar_controlvm_channel_protocol), + spar_controlvm_channel_protocol_uuid); + if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT( + visorchannel_get_header(controlvm_channel))) { + initialize_controlvm_payload(); + } else { + visorchannel_destroy(controlvm_channel); + controlvm_channel = NULL; + return -ENODEV; + } + } else { + return -ENODEV; + } + + major_dev = MKDEV(visorchipset_major, 0); + rc = visorchipset_file_init(major_dev, &controlvm_channel); + if (rc < 0) { + POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR); + goto cleanup; + } + + memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header)); + + memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header)); + + memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header)); + + putfile_buffer_list_pool = + kmem_cache_create(putfile_buffer_list_pool_name, + sizeof(struct putfile_buffer_entry), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!putfile_buffer_list_pool) { + POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR); + rc = -1; + goto cleanup; + } + if (!visorchipset_disable_controlvm) { + /* if booting in a crash kernel */ + if (visorchipset_crash_kernel) + INIT_DELAYED_WORK(&periodic_controlvm_work, + setup_crash_devices_work_queue); + else + INIT_DELAYED_WORK(&periodic_controlvm_work, + controlvm_periodic_work); + periodic_controlvm_workqueue = + create_singlethread_workqueue("visorchipset_controlvm"); + + if (!periodic_controlvm_workqueue) { + POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC, + DIAG_SEVERITY_ERR); + rc = -ENOMEM; + goto cleanup; + } + most_recent_message_jiffies = jiffies; + poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST; + rc = queue_delayed_work(periodic_controlvm_workqueue, + &periodic_controlvm_work, poll_jiffies); + if (rc < 0) { + POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC, + DIAG_SEVERITY_ERR); + goto cleanup; + } + } + + visorchipset_platform_device.dev.devt = major_dev; + if (platform_device_register(&visorchipset_platform_device) < 0) { + POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR); + rc = -1; + goto cleanup; + } + POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO); + rc = 0; +cleanup: + if (rc) { + POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc, + POSTCODE_SEVERITY_ERR); + } + return rc; +} + +static void +visorchipset_exit(void) +{ + POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO); + + if (visorchipset_disable_controlvm) { + ; + } else { + cancel_delayed_work(&periodic_controlvm_work); + flush_workqueue(periodic_controlvm_workqueue); + destroy_workqueue(periodic_controlvm_workqueue); + periodic_controlvm_workqueue = NULL; + destroy_controlvm_payload_info(&controlvm_payload_info); + } + if (putfile_buffer_list_pool) { + kmem_cache_destroy(putfile_buffer_list_pool); + putfile_buffer_list_pool = NULL; + } + + cleanup_controlvm_structures(); + + memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header)); + + memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header)); + + memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header)); + + visorchannel_destroy(controlvm_channel); + + visorchipset_file_cleanup(visorchipset_platform_device.dev.devt); + POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO); +} + +module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO); +MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet"); +int visorchipset_testvnic = 0; + +module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO); +MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest"); +int visorchipset_testvnicclient = 0; + +module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO); +MODULE_PARM_DESC(visorchipset_testmsg, + "1 to manufacture the chipset, bus, and switch messages"); +int visorchipset_testmsg = 0; + +module_param_named(major, visorchipset_major, int, S_IRUGO); +MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node"); +int visorchipset_major = 0; + +module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO); +MODULE_PARM_DESC(visorchipset_serverreqwait, + "1 to have the module wait for the visor bus to register"); +int visorchipset_serverregwait = 0; /* default is off */ +module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO); +MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register"); +int visorchipset_clientregwait = 1; /* default is on */ +module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO); +MODULE_PARM_DESC(visorchipset_testteardown, + "1 to test teardown of the chipset, bus, and switch"); +int visorchipset_testteardown = 0; /* default is off */ +module_param_named(disable_controlvm, visorchipset_disable_controlvm, int, + S_IRUGO); +MODULE_PARM_DESC(visorchipset_disable_controlvm, + "1 to disable polling of controlVm channel"); +int visorchipset_disable_controlvm = 0; /* default is off */ +module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO); +MODULE_PARM_DESC(visorchipset_crash_kernel, + "1 means we are running in crash kernel"); +int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */ +module_param_named(holdchipsetready, visorchipset_holdchipsetready, + int, S_IRUGO); +MODULE_PARM_DESC(visorchipset_holdchipsetready, + "1 to hold response to CHIPSET_READY"); +int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY + * response immediately */ +module_init(visorchipset_init); +module_exit(visorchipset_exit); + +MODULE_AUTHOR("Unisys"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver " + VERSION); +MODULE_VERSION(VERSION); diff --git a/kernel/drivers/staging/unisys/visorchipset/visorchipset_umode.h b/kernel/drivers/staging/unisys/visorchipset/visorchipset_umode.h new file mode 100644 index 000000000..6cf6eccb3 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorchipset/visorchipset_umode.h @@ -0,0 +1,35 @@ +/* visorchipset_umode.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/** @file ********************************************************************* + * + * This describes structures needed for the interface between the + * visorchipset driver and a user-mode component that opens the device. + * + ****************************************************************************** + */ + +#ifndef __VISORCHIPSET_UMODE_H +#define __VISORCHIPSET_UMODE_H + +/** The user-mode program can access the control channel buffer directly + * via this memory map. + */ +#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET (0x00000000) +#define VISORCHIPSET_MMAP_CONTROLCHANSIZE (0x00400000) /* 4MB */ + +#endif /* __VISORCHIPSET_UMODE_H */ diff --git a/kernel/drivers/staging/unisys/visorutil/Kconfig b/kernel/drivers/staging/unisys/visorutil/Kconfig new file mode 100644 index 000000000..be9c2cf89 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorutil/Kconfig @@ -0,0 +1,9 @@ +# +# Unisys timskmod configuration +# + +config UNISYS_VISORUTIL + tristate "Unisys visorutil driver" + ---help--- + If you say Y here, you will enable the Unisys visorutil driver. + diff --git a/kernel/drivers/staging/unisys/visorutil/Makefile b/kernel/drivers/staging/unisys/visorutil/Makefile new file mode 100644 index 000000000..d9ab5a36e --- /dev/null +++ b/kernel/drivers/staging/unisys/visorutil/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for Unisys timskmod +# + +obj-$(CONFIG_UNISYS_VISORUTIL) += visorutil.o + +visorutil-y := charqueue.o periodic_work.o memregion_direct.o visorkmodutils.o + +ccflags-y += -Idrivers/staging/unisys/include diff --git a/kernel/drivers/staging/unisys/visorutil/charqueue.c b/kernel/drivers/staging/unisys/visorutil/charqueue.c new file mode 100644 index 000000000..c91752a2d --- /dev/null +++ b/kernel/drivers/staging/unisys/visorutil/charqueue.c @@ -0,0 +1,127 @@ +/* charqueue.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* + * Simple character queue implementation for Linux kernel mode. + */ + +#include "charqueue.h" + +#define MYDRVNAME "charqueue" + +#define IS_EMPTY(charqueue) (charqueue->head == charqueue->tail) + +struct charqueue { + int alloc_size; + int nslots; + spinlock_t lock; /* read/write lock for this structure */ + int head, tail; + unsigned char buf[0]; +}; + +struct charqueue *visor_charqueue_create(ulong nslots) +{ + int alloc_size = sizeof(struct charqueue) + nslots + 1; + struct charqueue *cq; + + cq = kmalloc(alloc_size, GFP_KERNEL|__GFP_NORETRY); + if (cq == NULL) + return NULL; + cq->alloc_size = alloc_size; + cq->nslots = nslots; + cq->head = 0; + cq->tail = 0; + spin_lock_init(&cq->lock); + return cq; +} +EXPORT_SYMBOL_GPL(visor_charqueue_create); + +void visor_charqueue_enqueue(struct charqueue *charqueue, unsigned char c) +{ + int alloc_slots = charqueue->nslots+1; /* 1 slot is always empty */ + + spin_lock(&charqueue->lock); + charqueue->head = (charqueue->head+1) % alloc_slots; + if (charqueue->head == charqueue->tail) + /* overflow; overwrite the oldest entry */ + charqueue->tail = (charqueue->tail+1) % alloc_slots; + charqueue->buf[charqueue->head] = c; + spin_unlock(&charqueue->lock); +} +EXPORT_SYMBOL_GPL(visor_charqueue_enqueue); + +BOOL visor_charqueue_is_empty(struct charqueue *charqueue) +{ + BOOL b; + + spin_lock(&charqueue->lock); + b = IS_EMPTY(charqueue); + spin_unlock(&charqueue->lock); + return b; +} +EXPORT_SYMBOL_GPL(visor_charqueue_is_empty); + +static int charqueue_dequeue_1(struct charqueue *charqueue) +{ + int alloc_slots = charqueue->nslots + 1; /* 1 slot is always empty */ + + if (IS_EMPTY(charqueue)) + return -1; + charqueue->tail = (charqueue->tail+1) % alloc_slots; + return charqueue->buf[charqueue->tail]; +} + +int charqueue_dequeue(struct charqueue *charqueue) +{ + int rc; + + spin_lock(&charqueue->lock); + rc = charqueue_dequeue_1(charqueue); + spin_unlock(&charqueue->lock); + return rc; +} + +int visor_charqueue_dequeue_n(struct charqueue *charqueue, unsigned char *buf, + int n) +{ + int rc, counter = 0, c; + + spin_lock(&charqueue->lock); + for (;;) { + if (n <= 0) + break; /* no more buffer space */ + c = charqueue_dequeue_1(charqueue); + if (c < 0) + break; /* no more input */ + *buf = (unsigned char)(c); + buf++; + n--; + counter++; + } + rc = counter; + spin_unlock(&charqueue->lock); + return rc; +} +EXPORT_SYMBOL_GPL(visor_charqueue_dequeue_n); + +void visor_charqueue_destroy(struct charqueue *charqueue) +{ + if (charqueue == NULL) + return; + kfree(charqueue); +} +EXPORT_SYMBOL_GPL(visor_charqueue_destroy); diff --git a/kernel/drivers/staging/unisys/visorutil/charqueue.h b/kernel/drivers/staging/unisys/visorutil/charqueue.h new file mode 100644 index 000000000..f46a776b9 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorutil/charqueue.h @@ -0,0 +1,37 @@ +/* charqueue.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __CHARQUEUE_H__ +#define __CHARQUEUE_H__ + +#include "timskmod.h" + +/* struct charqueue is an opaque structure to users. + * Fields are declared only in the implementation .c files. + */ +struct charqueue; + +struct charqueue *visor_charqueue_create(ulong nslots); +void visor_charqueue_enqueue(struct charqueue *charqueue, unsigned char c); +int charqueue_dequeue(struct charqueue *charqueue); +int visor_charqueue_dequeue_n(struct charqueue *charqueue, unsigned char *buf, + int n); +BOOL visor_charqueue_is_empty(struct charqueue *charqueue); +void visor_charqueue_destroy(struct charqueue *charqueue); + +#endif + diff --git a/kernel/drivers/staging/unisys/visorutil/memregion.h b/kernel/drivers/staging/unisys/visorutil/memregion.h new file mode 100644 index 000000000..0c3eebcf6 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorutil/memregion.h @@ -0,0 +1,43 @@ +/* memregion.h + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#ifndef __MEMREGION_H__ +#define __MEMREGION_H__ + +#include "timskmod.h" + +/* struct memregion is an opaque structure to users. + * Fields are declared only in the implementation .c files. + */ +struct memregion; + +struct memregion *visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes); +struct memregion *visor_memregion_create_overlapped(struct memregion *parent, + ulong offset, ulong nbytes); +int visor_memregion_resize(struct memregion *memregion, ulong newsize); +int visor_memregion_read(struct memregion *memregion, + ulong offset, void *dest, ulong nbytes); +int visor_memregion_write(struct memregion *memregion, + ulong offset, void *src, ulong nbytes); +void visor_memregion_destroy(struct memregion *memregion); +HOSTADDRESS visor_memregion_get_physaddr(struct memregion *memregion); +ulong visor_memregion_get_nbytes(struct memregion *memregion); +void memregion_dump(struct memregion *memregion, char *s, + ulong off, ulong len, struct seq_file *seq); +void __iomem *visor_memregion_get_pointer(struct memregion *memregion); + +#endif diff --git a/kernel/drivers/staging/unisys/visorutil/memregion_direct.c b/kernel/drivers/staging/unisys/visorutil/memregion_direct.c new file mode 100644 index 000000000..eb7422fbe --- /dev/null +++ b/kernel/drivers/staging/unisys/visorutil/memregion_direct.c @@ -0,0 +1,207 @@ +/* memregion_direct.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* + * This is an implementation of memory regions that can be used to read/write + * channel memory (in main memory of the host system) from code running in + * a virtual partition. + */ +#include "timskmod.h" +#include "memregion.h" + +#define MYDRVNAME "memregion" + +struct memregion { + HOSTADDRESS physaddr; + ulong nbytes; + void __iomem *mapped; + BOOL requested; + BOOL overlapped; +}; + +static BOOL mapit(struct memregion *memregion); +static void unmapit(struct memregion *memregion); + +struct memregion * +visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes) +{ + struct memregion *rc = NULL; + struct memregion *memregion; + + memregion = kzalloc(sizeof(*memregion), GFP_KERNEL | __GFP_NORETRY); + if (memregion == NULL) + return NULL; + + memregion->physaddr = physaddr; + memregion->nbytes = nbytes; + memregion->overlapped = FALSE; + if (!mapit(memregion)) { + rc = NULL; + goto cleanup; + } + rc = memregion; +cleanup: + if (rc == NULL) { + visor_memregion_destroy(memregion); + memregion = NULL; + } + return rc; +} +EXPORT_SYMBOL_GPL(visor_memregion_create); + +struct memregion * +visor_memregion_create_overlapped(struct memregion *parent, ulong offset, + ulong nbytes) +{ + struct memregion *memregion = NULL; + + if (parent == NULL) + return NULL; + + if (parent->mapped == NULL) + return NULL; + + if ((offset >= parent->nbytes) || + ((offset + nbytes) >= parent->nbytes)) + return NULL; + + memregion = kzalloc(sizeof(*memregion), GFP_KERNEL|__GFP_NORETRY); + if (memregion == NULL) + return NULL; + + memregion->physaddr = parent->physaddr + offset; + memregion->nbytes = nbytes; + memregion->mapped = ((u8 __iomem *)(parent->mapped)) + offset; + memregion->requested = FALSE; + memregion->overlapped = TRUE; + return memregion; +} +EXPORT_SYMBOL_GPL(visor_memregion_create_overlapped); + +static BOOL +mapit(struct memregion *memregion) +{ + ulong physaddr = (ulong)(memregion->physaddr); + ulong nbytes = memregion->nbytes; + + memregion->requested = FALSE; + if (request_mem_region(physaddr, nbytes, MYDRVNAME)) + memregion->requested = TRUE; + memregion->mapped = ioremap_cache(physaddr, nbytes); + if (!memregion->mapped) + return FALSE; + return TRUE; +} + +static void +unmapit(struct memregion *memregion) +{ + if (memregion->mapped != NULL) { + iounmap(memregion->mapped); + memregion->mapped = NULL; + } + if (memregion->requested) { + release_mem_region((ulong)(memregion->physaddr), + memregion->nbytes); + memregion->requested = FALSE; + } +} + +HOSTADDRESS +visor_memregion_get_physaddr(struct memregion *memregion) +{ + return memregion->physaddr; +} +EXPORT_SYMBOL_GPL(visor_memregion_get_physaddr); + +ulong +visor_memregion_get_nbytes(struct memregion *memregion) +{ + return memregion->nbytes; +} +EXPORT_SYMBOL_GPL(visor_memregion_get_nbytes); + +void __iomem * +visor_memregion_get_pointer(struct memregion *memregion) +{ + return memregion->mapped; +} +EXPORT_SYMBOL_GPL(visor_memregion_get_pointer); + +int +visor_memregion_resize(struct memregion *memregion, ulong newsize) +{ + if (newsize == memregion->nbytes) + return 0; + if (memregion->overlapped) + /* no error check here - we no longer know the + * parent's range! + */ + memregion->nbytes = newsize; + else { + unmapit(memregion); + memregion->nbytes = newsize; + if (!mapit(memregion)) + return -1; + } + return 0; +} +EXPORT_SYMBOL_GPL(visor_memregion_resize); + +static int +memregion_readwrite(BOOL is_write, + struct memregion *memregion, ulong offset, + void *local, ulong nbytes) +{ + if (offset + nbytes > memregion->nbytes) + return -EIO; + + if (is_write) + memcpy_toio(memregion->mapped + offset, local, nbytes); + else + memcpy_fromio(local, memregion->mapped + offset, nbytes); + + return 0; +} + +int +visor_memregion_read(struct memregion *memregion, ulong offset, void *dest, + ulong nbytes) +{ + return memregion_readwrite(FALSE, memregion, offset, dest, nbytes); +} +EXPORT_SYMBOL_GPL(visor_memregion_read); + +int +visor_memregion_write(struct memregion *memregion, ulong offset, void *src, + ulong nbytes) +{ + return memregion_readwrite(TRUE, memregion, offset, src, nbytes); +} +EXPORT_SYMBOL_GPL(visor_memregion_write); + +void +visor_memregion_destroy(struct memregion *memregion) +{ + if (memregion == NULL) + return; + if (!memregion->overlapped) + unmapit(memregion); + kfree(memregion); +} +EXPORT_SYMBOL_GPL(visor_memregion_destroy); + diff --git a/kernel/drivers/staging/unisys/visorutil/periodic_work.c b/kernel/drivers/staging/unisys/visorutil/periodic_work.c new file mode 100644 index 000000000..abbfb4889 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorutil/periodic_work.c @@ -0,0 +1,204 @@ +/* periodic_work.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +/* + * Helper functions to schedule periodic work in Linux kernel mode. + */ + +#include "timskmod.h" +#include "periodic_work.h" + +#define MYDRVNAME "periodic_work" + +struct periodic_work { + rwlock_t lock; + struct delayed_work work; + void (*workfunc)(void *); + void *workfuncarg; + BOOL is_scheduled; + BOOL want_to_stop; + ulong jiffy_interval; + struct workqueue_struct *workqueue; + const char *devnam; +}; + +static void periodic_work_func(struct work_struct *work) +{ + struct periodic_work *pw; + + pw = container_of(work, struct periodic_work, work.work); + (*pw->workfunc)(pw->workfuncarg); +} + +struct periodic_work *visor_periodic_work_create(ulong jiffy_interval, + struct workqueue_struct *workqueue, + void (*workfunc)(void *), + void *workfuncarg, + const char *devnam) +{ + struct periodic_work *pw; + + pw = kzalloc(sizeof(*pw), GFP_KERNEL | __GFP_NORETRY); + if (!pw) + return NULL; + + rwlock_init(&pw->lock); + pw->jiffy_interval = jiffy_interval; + pw->workqueue = workqueue; + pw->workfunc = workfunc; + pw->workfuncarg = workfuncarg; + pw->devnam = devnam; + return pw; +} +EXPORT_SYMBOL_GPL(visor_periodic_work_create); + +void visor_periodic_work_destroy(struct periodic_work *pw) +{ + kfree(pw); +} +EXPORT_SYMBOL_GPL(visor_periodic_work_destroy); + +/** Call this from your periodic work worker function to schedule the next + * call. + * If this function returns FALSE, there was a failure and the + * periodic work is no longer scheduled + */ +BOOL visor_periodic_work_nextperiod(struct periodic_work *pw) +{ + BOOL rc = FALSE; + + write_lock(&pw->lock); + if (pw->want_to_stop) { + pw->is_scheduled = FALSE; + pw->want_to_stop = FALSE; + rc = TRUE; /* yes, TRUE; see visor_periodic_work_stop() */ + goto unlock; + } else if (queue_delayed_work(pw->workqueue, &pw->work, + pw->jiffy_interval) < 0) { + pw->is_scheduled = FALSE; + rc = FALSE; + goto unlock; + } + rc = TRUE; +unlock: + write_unlock(&pw->lock); + return rc; +} +EXPORT_SYMBOL_GPL(visor_periodic_work_nextperiod); + +/** This function returns TRUE iff new periodic work was actually started. + * If this function returns FALSE, then no work was started + * (either because it was already started, or because of a failure). + */ +BOOL visor_periodic_work_start(struct periodic_work *pw) +{ + BOOL rc = FALSE; + + write_lock(&pw->lock); + if (pw->is_scheduled) { + rc = FALSE; + goto unlock; + } + if (pw->want_to_stop) { + rc = FALSE; + goto unlock; + } + INIT_DELAYED_WORK(&pw->work, &periodic_work_func); + if (queue_delayed_work(pw->workqueue, &pw->work, + pw->jiffy_interval) < 0) { + rc = FALSE; + goto unlock; + } + pw->is_scheduled = TRUE; + rc = TRUE; +unlock: + write_unlock(&pw->lock); + return rc; +} +EXPORT_SYMBOL_GPL(visor_periodic_work_start); + +/** This function returns TRUE iff your call actually stopped the periodic + * work. + * + * -- PAY ATTENTION... this is important -- + * + * NO NO #1 + * + * Do NOT call this function from some function that is running on the + * same workqueue as the work you are trying to stop might be running + * on! If you violate this rule, visor_periodic_work_stop() MIGHT work, + * but it also MIGHT get hung up in an infinite loop saying + * "waiting for delayed work...". This will happen if the delayed work + * you are trying to cancel has been put in the workqueue list, but can't + * run yet because we are running that same workqueue thread right now. + * + * Bottom line: If you need to call visor_periodic_work_stop() from a + * workitem, be sure the workitem is on a DIFFERENT workqueue than the + * workitem that you are trying to cancel. + * + * If I could figure out some way to check for this "no no" condition in + * the code, I would. It would have saved me the trouble of writing this + * long comment. And also, don't think this is some "theoretical" race + * condition. It is REAL, as I have spent the day chasing it. + * + * NO NO #2 + * + * Take close note of the locks that you own when you call this function. + * You must NOT own any locks that are needed by the periodic work + * function that is currently installed. If you DO, a deadlock may result, + * because stopping the periodic work often involves waiting for the last + * iteration of the periodic work function to complete. Again, if you hit + * this deadlock, you will get hung up in an infinite loop saying + * "waiting for delayed work...". + */ +BOOL visor_periodic_work_stop(struct periodic_work *pw) +{ + BOOL stopped_something = FALSE; + + write_lock(&pw->lock); + stopped_something = pw->is_scheduled && (!pw->want_to_stop); + while (pw->is_scheduled) { + pw->want_to_stop = TRUE; + if (cancel_delayed_work(&pw->work)) { + /* We get here if the delayed work was pending as + * delayed work, but was NOT run. + */ + WARN_ON(!pw->is_scheduled); + pw->is_scheduled = FALSE; + } else { + /* If we get here, either the delayed work: + * - was run, OR, + * - is running RIGHT NOW on another processor, OR, + * - wasn't even scheduled (there is a miniscule + * timing window where this could be the case) + * flush_workqueue() would make sure it is finished + * executing, but that still isn't very useful, which + * explains the loop... + */ + } + if (pw->is_scheduled) { + write_unlock(&pw->lock); + SLEEPJIFFIES(10); + write_lock(&pw->lock); + } else { + pw->want_to_stop = FALSE; + } + } + write_unlock(&pw->lock); + return stopped_something; +} +EXPORT_SYMBOL_GPL(visor_periodic_work_stop); diff --git a/kernel/drivers/staging/unisys/visorutil/visorkmodutils.c b/kernel/drivers/staging/unisys/visorutil/visorkmodutils.c new file mode 100644 index 000000000..62f0f7046 --- /dev/null +++ b/kernel/drivers/staging/unisys/visorutil/visorkmodutils.c @@ -0,0 +1,71 @@ +/* timskmodutils.c + * + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + +#include "timskmod.h" + +#define MYDRVNAME "timskmodutils" + +/* s-Par uses the Intel processor's VT-X features to separate groups of + * processors into partitions. The firmware sets the hypervisor bit and + * reports an ID in the HV capabilities leaf so that the partition's OS + * knows s-Par is present and managing the processors. + */ + +#define UNISYS_SPAR_LEAF_ID 0x40000000 + +/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */ +#define UNISYS_SPAR_ID_EBX 0x73696e55 +#define UNISYS_SPAR_ID_ECX 0x70537379 +#define UNISYS_SPAR_ID_EDX 0x34367261 + +int unisys_spar_platform; +EXPORT_SYMBOL_GPL(unisys_spar_platform); + +static __init uint32_t visorutil_spar_detect(void) +{ + unsigned int eax, ebx, ecx, edx; + + if (cpu_has_hypervisor) { + /* check the ID */ + cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx); + return (ebx == UNISYS_SPAR_ID_EBX) && + (ecx == UNISYS_SPAR_ID_ECX) && + (edx == UNISYS_SPAR_ID_EDX); + } else { + return 0; + } +} + +static __init int visorutil_mod_init(void) +{ + if (visorutil_spar_detect()) { + unisys_spar_platform = TRUE; + return 0; + } else { + return -ENODEV; + } +} + +static __exit void +visorutil_mod_exit(void) +{ +} + +module_init(visorutil_mod_init); +module_exit(visorutil_mod_exit); + +MODULE_LICENSE("GPL"); -- cgit 1.2.3-korg