summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/scsi/megaraid
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/drivers/scsi/megaraid
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/drivers/scsi/megaraid')
-rw-r--r--kernel/drivers/scsi/megaraid/Kconfig.megaraid85
-rw-r--r--kernel/drivers/scsi/megaraid/Makefile5
-rw-r--r--kernel/drivers/scsi/megaraid/mbox_defs.h790
-rw-r--r--kernel/drivers/scsi/megaraid/mega_common.h290
-rw-r--r--kernel/drivers/scsi/megaraid/megaraid_ioctl.h300
-rw-r--r--kernel/drivers/scsi/megaraid/megaraid_mbox.c4145
-rw-r--r--kernel/drivers/scsi/megaraid/megaraid_mbox.h238
-rw-r--r--kernel/drivers/scsi/megaraid/megaraid_mm.c1263
-rw-r--r--kernel/drivers/scsi/megaraid/megaraid_mm.h101
-rw-r--r--kernel/drivers/scsi/megaraid/megaraid_sas.h1990
-rw-r--r--kernel/drivers/scsi/megaraid/megaraid_sas_base.c6892
-rw-r--r--kernel/drivers/scsi/megaraid/megaraid_sas_fp.c1360
-rw-r--r--kernel/drivers/scsi/megaraid/megaraid_sas_fusion.c2993
-rw-r--r--kernel/drivers/scsi/megaraid/megaraid_sas_fusion.h850
14 files changed, 21302 insertions, 0 deletions
diff --git a/kernel/drivers/scsi/megaraid/Kconfig.megaraid b/kernel/drivers/scsi/megaraid/Kconfig.megaraid
new file mode 100644
index 000000000..17419e30f
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/Kconfig.megaraid
@@ -0,0 +1,85 @@
+config MEGARAID_NEWGEN
+ bool "LSI Logic New Generation RAID Device Drivers"
+ depends on PCI && SCSI
+ help
+ LSI Logic RAID Device Drivers
+
+config MEGARAID_MM
+ tristate "LSI Logic Management Module (New Driver)"
+ depends on PCI && SCSI && MEGARAID_NEWGEN
+ help
+ Management Module provides ioctl, sysfs support for LSI Logic
+ RAID controllers.
+ To compile this driver as a module, choose M here: the
+ module will be called megaraid_mm
+
+
+config MEGARAID_MAILBOX
+ tristate "LSI Logic MegaRAID Driver (New Driver)"
+ depends on PCI && SCSI && MEGARAID_MM
+ help
+ List of supported controllers
+
+ OEM Product Name VID :DID :SVID:SSID
+ --- ------------ ---- ---- ---- ----
+ Dell PERC3/QC 101E:1960:1028:0471
+ Dell PERC3/DC 101E:1960:1028:0493
+ Dell PERC3/SC 101E:1960:1028:0475
+ Dell PERC3/Di 1028:000E:1028:0123
+ Dell PERC4/SC 1000:1960:1028:0520
+ Dell PERC4/DC 1000:1960:1028:0518
+ Dell PERC4/QC 1000:0407:1028:0531
+ Dell PERC4/Di 1028:000F:1028:014A
+ Dell PERC 4e/Si 1028:0013:1028:016c
+ Dell PERC 4e/Di 1028:0013:1028:016d
+ Dell PERC 4e/Di 1028:0013:1028:016e
+ Dell PERC 4e/Di 1028:0013:1028:016f
+ Dell PERC 4e/Di 1028:0013:1028:0170
+ Dell PERC 4e/DC 1000:0408:1028:0002
+ Dell PERC 4e/SC 1000:0408:1028:0001
+ LSI MegaRAID SCSI 320-0 1000:1960:1000:A520
+ LSI MegaRAID SCSI 320-1 1000:1960:1000:0520
+ LSI MegaRAID SCSI 320-2 1000:1960:1000:0518
+ LSI MegaRAID SCSI 320-0X 1000:0407:1000:0530
+ LSI MegaRAID SCSI 320-2X 1000:0407:1000:0532
+ LSI MegaRAID SCSI 320-4X 1000:0407:1000:0531
+ LSI MegaRAID SCSI 320-1E 1000:0408:1000:0001
+ LSI MegaRAID SCSI 320-2E 1000:0408:1000:0002
+ LSI MegaRAID SATA 150-4 1000:1960:1000:4523
+ LSI MegaRAID SATA 150-6 1000:1960:1000:0523
+ LSI MegaRAID SATA 300-4X 1000:0409:1000:3004
+ LSI MegaRAID SATA 300-8X 1000:0409:1000:3008
+ INTEL RAID Controller SRCU42X 1000:0407:8086:0532
+ INTEL RAID Controller SRCS16 1000:1960:8086:0523
+ INTEL RAID Controller SRCU42E 1000:0408:8086:0002
+ INTEL RAID Controller SRCZCRX 1000:0407:8086:0530
+ INTEL RAID Controller SRCS28X 1000:0409:8086:3008
+ INTEL RAID Controller SROMBU42E 1000:0408:8086:3431
+ INTEL RAID Controller SROMBU42E 1000:0408:8086:3499
+ INTEL RAID Controller SRCU51L 1000:1960:8086:0520
+ FSC MegaRAID PCI Express ROMB 1000:0408:1734:1065
+ ACER MegaRAID ROMB-2E 1000:0408:1025:004D
+ NEC MegaRAID PCI Express ROMB 1000:0408:1033:8287
+
+ To compile this driver as a module, choose M here: the
+ module will be called megaraid_mbox
+
+config MEGARAID_LEGACY
+ tristate "LSI Logic Legacy MegaRAID Driver"
+ depends on PCI && SCSI
+ help
+ This driver supports the LSI MegaRAID 418, 428, 438, 466, 762, 490
+ and 467 SCSI host adapters. This driver also support the all U320
+ RAID controllers
+
+ To compile this driver as a module, choose M here: the
+ module will be called megaraid
+
+config MEGARAID_SAS
+ tristate "LSI Logic MegaRAID SAS RAID Module"
+ depends on PCI && SCSI
+ help
+ Module for LSI Logic's SAS based RAID controllers.
+ To compile this driver as a module, choose 'm' here.
+ Module will be called megaraid_sas
+
diff --git a/kernel/drivers/scsi/megaraid/Makefile b/kernel/drivers/scsi/megaraid/Makefile
new file mode 100644
index 000000000..5826ed509
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
+obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
+obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
+megaraid_sas-objs := megaraid_sas_base.o megaraid_sas_fusion.o \
+ megaraid_sas_fp.o
diff --git a/kernel/drivers/scsi/megaraid/mbox_defs.h b/kernel/drivers/scsi/megaraid/mbox_defs.h
new file mode 100644
index 000000000..e01c6f7c2
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/mbox_defs.h
@@ -0,0 +1,790 @@
+/*
+ *
+ * Linux MegaRAID Unified device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : mbox_defs.h
+ *
+ */
+#ifndef _MRAID_MBOX_DEFS_H_
+#define _MRAID_MBOX_DEFS_H_
+
+#include <linux/types.h>
+
+/*
+ * Commands and states for mailbox based controllers
+ */
+
+#define MBOXCMD_LREAD 0x01
+#define MBOXCMD_LWRITE 0x02
+#define MBOXCMD_PASSTHRU 0x03
+#define MBOXCMD_ADPEXTINQ 0x04
+#define MBOXCMD_ADAPTERINQ 0x05
+#define MBOXCMD_LREAD64 0xA7
+#define MBOXCMD_LWRITE64 0xA8
+#define MBOXCMD_PASSTHRU64 0xC3
+#define MBOXCMD_EXTPTHRU 0xE3
+
+#define MAIN_MISC_OPCODE 0xA4
+#define GET_MAX_SG_SUPPORT 0x01
+#define SUPPORT_EXT_CDB 0x16
+
+#define FC_NEW_CONFIG 0xA1
+#define NC_SUBOP_PRODUCT_INFO 0x0E
+#define NC_SUBOP_ENQUIRY3 0x0F
+#define ENQ3_GET_SOLICITED_FULL 0x02
+#define OP_DCMD_READ_CONFIG 0x04
+#define NEW_READ_CONFIG_8LD 0x67
+#define READ_CONFIG_8LD 0x07
+#define FLUSH_ADAPTER 0x0A
+#define FLUSH_SYSTEM 0xFE
+
+/*
+ * Command for random deletion of logical drives
+ */
+#define FC_DEL_LOGDRV 0xA4
+#define OP_SUP_DEL_LOGDRV 0x2A
+#define OP_GET_LDID_MAP 0x18
+#define OP_DEL_LOGDRV 0x1C
+
+/*
+ * BIOS commands
+ */
+#define IS_BIOS_ENABLED 0x62
+#define GET_BIOS 0x01
+#define CHNL_CLASS 0xA9
+#define GET_CHNL_CLASS 0x00
+#define SET_CHNL_CLASS 0x01
+#define CH_RAID 0x01
+#define CH_SCSI 0x00
+#define BIOS_PVT_DATA 0x40
+#define GET_BIOS_PVT_DATA 0x00
+
+
+/*
+ * Commands to support clustering
+ */
+#define GET_TARGET_ID 0x7D
+#define CLUSTER_OP 0x70
+#define GET_CLUSTER_MODE 0x02
+#define CLUSTER_CMD 0x6E
+#define RESERVE_LD 0x01
+#define RELEASE_LD 0x02
+#define RESET_RESERVATIONS 0x03
+#define RESERVATION_STATUS 0x04
+#define RESERVE_PD 0x05
+#define RELEASE_PD 0x06
+
+
+/*
+ * Module battery status
+ */
+#define BATTERY_MODULE_MISSING 0x01
+#define BATTERY_LOW_VOLTAGE 0x02
+#define BATTERY_TEMP_HIGH 0x04
+#define BATTERY_PACK_MISSING 0x08
+#define BATTERY_CHARGE_MASK 0x30
+#define BATTERY_CHARGE_DONE 0x00
+#define BATTERY_CHARGE_INPROG 0x10
+#define BATTERY_CHARGE_FAIL 0x20
+#define BATTERY_CYCLES_EXCEEDED 0x40
+
+/*
+ * Physical drive states.
+ */
+#define PDRV_UNCNF 0
+#define PDRV_ONLINE 3
+#define PDRV_FAILED 4
+#define PDRV_RBLD 5
+#define PDRV_HOTSPARE 6
+
+
+/*
+ * Raid logical drive states.
+ */
+#define RDRV_OFFLINE 0
+#define RDRV_DEGRADED 1
+#define RDRV_OPTIMAL 2
+#define RDRV_DELETED 3
+
+/*
+ * Read, write and cache policies
+ */
+#define NO_READ_AHEAD 0
+#define READ_AHEAD 1
+#define ADAP_READ_AHEAD 2
+#define WRMODE_WRITE_THRU 0
+#define WRMODE_WRITE_BACK 1
+#define CACHED_IO 0
+#define DIRECT_IO 1
+
+#define MAX_LOGICAL_DRIVES_8LD 8
+#define MAX_LOGICAL_DRIVES_40LD 40
+#define FC_MAX_PHYSICAL_DEVICES 256
+#define MAX_MBOX_CHANNELS 5
+#define MAX_MBOX_TARGET 15
+#define MBOX_MAX_PHYSICAL_DRIVES MAX_MBOX_CHANNELS*MAX_MBOX_TARGET
+#define MAX_ROW_SIZE_40LD 32
+#define MAX_ROW_SIZE_8LD 8
+#define SPAN_DEPTH_8_SPANS 8
+#define SPAN_DEPTH_4_SPANS 4
+#define MAX_REQ_SENSE_LEN 0x20
+
+
+
+/**
+ * struct mbox_t - Driver and f/w handshake structure.
+ * @cmd : firmware command
+ * @cmdid : command id
+ * @numsectors : number of sectors to be transferred
+ * @lba : Logical Block Address on LD
+ * @xferaddr : DMA address for data transfer
+ * @logdrv : logical drive number
+ * @numsge : number of scatter gather elements in sg list
+ * @resvd : reserved
+ * @busy : f/w busy, must wait to issue more commands.
+ * @numstatus : number of commands completed.
+ * @status : status of the commands completed
+ * @completed : array of completed command ids.
+ * @poll : poll and ack sequence
+ * @ack : poll and ack sequence
+ *
+ * The central handshake structure between the driver and the firmware. This
+ * structure must be allocated by the driver and aligned at 8-byte boundary.
+ */
+#define MBOX_MAX_FIRMWARE_STATUS 46
+typedef struct {
+ uint8_t cmd;
+ uint8_t cmdid;
+ uint16_t numsectors;
+ uint32_t lba;
+ uint32_t xferaddr;
+ uint8_t logdrv;
+ uint8_t numsge;
+ uint8_t resvd;
+ uint8_t busy;
+ uint8_t numstatus;
+ uint8_t status;
+ uint8_t completed[MBOX_MAX_FIRMWARE_STATUS];
+ uint8_t poll;
+ uint8_t ack;
+} __attribute__ ((packed)) mbox_t;
+
+
+/**
+ * mbox64_t - 64-bit extension for the mailbox
+ * @segment_lo : the low 32-bits of the address of the scatter-gather list
+ * @segment_hi : the upper 32-bits of the address of the scatter-gather list
+ * @mbox : 32-bit mailbox, whose xferadder field must be set to
+ * 0xFFFFFFFF
+ *
+ * This is the extension of the 32-bit mailbox to be able to perform DMA
+ * beyond 4GB address range.
+ */
+typedef struct {
+ uint32_t xferaddr_lo;
+ uint32_t xferaddr_hi;
+ mbox_t mbox32;
+} __attribute__ ((packed)) mbox64_t;
+
+/*
+ * mailbox structure used for internal commands
+ */
+typedef struct {
+ u8 cmd;
+ u8 cmdid;
+ u8 opcode;
+ u8 subopcode;
+ u32 lba;
+ u32 xferaddr;
+ u8 logdrv;
+ u8 rsvd[3];
+ u8 numstatus;
+ u8 status;
+} __attribute__ ((packed)) int_mbox_t;
+
+/**
+ * mraid_passthru_t - passthru structure to issue commands to physical devices
+ * @timeout : command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr
+ * @ars : set if ARS required after check condition
+ * @islogical : set if command meant for logical devices
+ * @logdrv : logical drive number if command for LD
+ * @channel : Channel on which physical device is located
+ * @target : SCSI target of the device
+ * @queuetag : unused
+ * @queueaction : unused
+ * @cdb : SCSI CDB
+ * @cdblen : length of the CDB
+ * @reqsenselen : amount of request sense data to be returned
+ * @reqsensearea : Sense information buffer
+ * @numsge : number of scatter-gather elements in the sg list
+ * @scsistatus : SCSI status of the command completed.
+ * @dataxferaddr : DMA data transfer address
+ * @dataxferlen : amount of the data to be transferred.
+ */
+typedef struct {
+ uint8_t timeout :3;
+ uint8_t ars :1;
+ uint8_t reserved :3;
+ uint8_t islogical :1;
+ uint8_t logdrv;
+ uint8_t channel;
+ uint8_t target;
+ uint8_t queuetag;
+ uint8_t queueaction;
+ uint8_t cdb[10];
+ uint8_t cdblen;
+ uint8_t reqsenselen;
+ uint8_t reqsensearea[MAX_REQ_SENSE_LEN];
+ uint8_t numsge;
+ uint8_t scsistatus;
+ uint32_t dataxferaddr;
+ uint32_t dataxferlen;
+} __attribute__ ((packed)) mraid_passthru_t;
+
+typedef struct {
+
+ uint32_t dataxferaddr_lo;
+ uint32_t dataxferaddr_hi;
+ mraid_passthru_t pthru32;
+
+} __attribute__ ((packed)) mega_passthru64_t;
+
+/**
+ * mraid_epassthru_t - passthru structure to issue commands to physical devices
+ * @timeout : command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr
+ * @ars : set if ARS required after check condition
+ * @rsvd1 : reserved field
+ * @cd_rom : (?)
+ * @rsvd2 : reserved field
+ * @islogical : set if command meant for logical devices
+ * @logdrv : logical drive number if command for LD
+ * @channel : Channel on which physical device is located
+ * @target : SCSI target of the device
+ * @queuetag : unused
+ * @queueaction : unused
+ * @cdblen : length of the CDB
+ * @rsvd3 : reserved field
+ * @cdb : SCSI CDB
+ * @numsge : number of scatter-gather elements in the sg list
+ * @status : SCSI status of the command completed.
+ * @reqsenselen : amount of request sense data to be returned
+ * @reqsensearea : Sense information buffer
+ * @rsvd4 : reserved field
+ * @dataxferaddr : DMA data transfer address
+ * @dataxferlen : amount of the data to be transferred.
+ */
+typedef struct {
+ uint8_t timeout :3;
+ uint8_t ars :1;
+ uint8_t rsvd1 :1;
+ uint8_t cd_rom :1;
+ uint8_t rsvd2 :1;
+ uint8_t islogical :1;
+ uint8_t logdrv;
+ uint8_t channel;
+ uint8_t target;
+ uint8_t queuetag;
+ uint8_t queueaction;
+ uint8_t cdblen;
+ uint8_t rsvd3;
+ uint8_t cdb[16];
+ uint8_t numsge;
+ uint8_t status;
+ uint8_t reqsenselen;
+ uint8_t reqsensearea[MAX_REQ_SENSE_LEN];
+ uint8_t rsvd4;
+ uint32_t dataxferaddr;
+ uint32_t dataxferlen;
+} __attribute__ ((packed)) mraid_epassthru_t;
+
+
+/**
+ * mraid_pinfo_t - product info, static information about the controller
+ * @data_size : current size in bytes (not including resvd)
+ * @config_signature : Current value is 0x00282008
+ * @fw_version : Firmware version
+ * @bios_version : version of the BIOS
+ * @product_name : Name given to the controller
+ * @max_commands : Maximum concurrent commands supported
+ * @nchannels : Number of SCSI Channels detected
+ * @fc_loop_present : Number of Fibre Loops detected
+ * @mem_type : EDO, FPM, SDRAM etc
+ * @signature :
+ * @dram_size : In terms of MB
+ * @subsysid : device PCI subsystem ID
+ * @subsysvid : device PCI subsystem vendor ID
+ * @notify_counters :
+ * @pad1k : 135 + 889 resvd = 1024 total size
+ *
+ * This structures holds the information about the controller which is not
+ * expected to change dynamically.
+ *
+ * The current value of config signature is 0x00282008:
+ * 0x28 = MAX_LOGICAL_DRIVES,
+ * 0x20 = Number of stripes and
+ * 0x08 = Number of spans
+ */
+typedef struct {
+ uint32_t data_size;
+ uint32_t config_signature;
+ uint8_t fw_version[16];
+ uint8_t bios_version[16];
+ uint8_t product_name[80];
+ uint8_t max_commands;
+ uint8_t nchannels;
+ uint8_t fc_loop_present;
+ uint8_t mem_type;
+ uint32_t signature;
+ uint16_t dram_size;
+ uint16_t subsysid;
+ uint16_t subsysvid;
+ uint8_t notify_counters;
+ uint8_t pad1k[889];
+} __attribute__ ((packed)) mraid_pinfo_t;
+
+
+/**
+ * mraid_notify_t - the notification structure
+ * @global_counter : Any change increments this counter
+ * @param_counter : Indicates any params changed
+ * @param_id : Param modified - defined below
+ * @param_val : New val of last param modified
+ * @write_config_counter : write config occurred
+ * @write_config_rsvd :
+ * @ldrv_op_counter : Indicates ldrv op started/completed
+ * @ldrv_opid : ldrv num
+ * @ldrv_opcmd : ldrv operation - defined below
+ * @ldrv_opstatus : status of the operation
+ * @ldrv_state_counter : Indicates change of ldrv state
+ * @ldrv_state_id : ldrv num
+ * @ldrv_state_new : New state
+ * @ldrv_state_old : old state
+ * @pdrv_state_counter : Indicates change of ldrv state
+ * @pdrv_state_id : pdrv id
+ * @pdrv_state_new : New state
+ * @pdrv_state_old : old state
+ * @pdrv_fmt_counter : Indicates pdrv format started/over
+ * @pdrv_fmt_id : pdrv id
+ * @pdrv_fmt_val : format started/over
+ * @pdrv_fmt_rsvd :
+ * @targ_xfer_counter : Indicates SCSI-2 Xfer rate change
+ * @targ_xfer_id : pdrv Id
+ * @targ_xfer_val : new Xfer params of last pdrv
+ * @targ_xfer_rsvd :
+ * @fcloop_id_chg_counter : Indicates loopid changed
+ * @fcloopid_pdrvid : pdrv id
+ * @fcloop_id0 : loopid on fc loop 0
+ * @fcloop_id1 : loopid on fc loop 1
+ * @fcloop_state_counter : Indicates loop state changed
+ * @fcloop_state0 : state of fc loop 0
+ * @fcloop_state1 : state of fc loop 1
+ * @fcloop_state_rsvd :
+ */
+typedef struct {
+ uint32_t global_counter;
+ uint8_t param_counter;
+ uint8_t param_id;
+ uint16_t param_val;
+ uint8_t write_config_counter;
+ uint8_t write_config_rsvd[3];
+ uint8_t ldrv_op_counter;
+ uint8_t ldrv_opid;
+ uint8_t ldrv_opcmd;
+ uint8_t ldrv_opstatus;
+ uint8_t ldrv_state_counter;
+ uint8_t ldrv_state_id;
+ uint8_t ldrv_state_new;
+ uint8_t ldrv_state_old;
+ uint8_t pdrv_state_counter;
+ uint8_t pdrv_state_id;
+ uint8_t pdrv_state_new;
+ uint8_t pdrv_state_old;
+ uint8_t pdrv_fmt_counter;
+ uint8_t pdrv_fmt_id;
+ uint8_t pdrv_fmt_val;
+ uint8_t pdrv_fmt_rsvd;
+ uint8_t targ_xfer_counter;
+ uint8_t targ_xfer_id;
+ uint8_t targ_xfer_val;
+ uint8_t targ_xfer_rsvd;
+ uint8_t fcloop_id_chg_counter;
+ uint8_t fcloopid_pdrvid;
+ uint8_t fcloop_id0;
+ uint8_t fcloop_id1;
+ uint8_t fcloop_state_counter;
+ uint8_t fcloop_state0;
+ uint8_t fcloop_state1;
+ uint8_t fcloop_state_rsvd;
+} __attribute__ ((packed)) mraid_notify_t;
+
+
+/**
+ * mraid_inquiry3_t - enquiry for device information
+ *
+ * @data_size : current size in bytes (not including resvd)
+ * @notify :
+ * @notify_rsvd :
+ * @rebuild_rate : rebuild rate (0% - 100%)
+ * @cache_flush_int : cache flush interval in seconds
+ * @sense_alert :
+ * @drive_insert_count : drive insertion count
+ * @battery_status :
+ * @num_ldrv : no. of Log Drives configured
+ * @recon_state : state of reconstruct
+ * @ldrv_op_status : logdrv Status
+ * @ldrv_size : size of each log drv
+ * @ldrv_prop :
+ * @ldrv_state : state of log drives
+ * @pdrv_state : state of phys drvs.
+ * @pdrv_format :
+ * @targ_xfer : phys device transfer rate
+ * @pad1k : 761 + 263reserved = 1024 bytes total size
+ */
+#define MAX_NOTIFY_SIZE 0x80
+#define CUR_NOTIFY_SIZE sizeof(mraid_notify_t)
+
+typedef struct {
+ uint32_t data_size;
+
+ mraid_notify_t notify;
+
+ uint8_t notify_rsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE];
+
+ uint8_t rebuild_rate;
+ uint8_t cache_flush_int;
+ uint8_t sense_alert;
+ uint8_t drive_insert_count;
+
+ uint8_t battery_status;
+ uint8_t num_ldrv;
+ uint8_t recon_state[MAX_LOGICAL_DRIVES_40LD / 8];
+ uint16_t ldrv_op_status[MAX_LOGICAL_DRIVES_40LD / 8];
+
+ uint32_t ldrv_size[MAX_LOGICAL_DRIVES_40LD];
+ uint8_t ldrv_prop[MAX_LOGICAL_DRIVES_40LD];
+ uint8_t ldrv_state[MAX_LOGICAL_DRIVES_40LD];
+ uint8_t pdrv_state[FC_MAX_PHYSICAL_DEVICES];
+ uint16_t pdrv_format[FC_MAX_PHYSICAL_DEVICES / 16];
+
+ uint8_t targ_xfer[80];
+ uint8_t pad1k[263];
+} __attribute__ ((packed)) mraid_inquiry3_t;
+
+
+/**
+ * mraid_adapinfo_t - information about the adapter
+ * @max_commands : max concurrent commands supported
+ * @rebuild_rate : rebuild rate - 0% thru 100%
+ * @max_targ_per_chan : max targ per channel
+ * @nchannels : number of channels on HBA
+ * @fw_version : firmware version
+ * @age_of_flash : number of times FW has been flashed
+ * @chip_set_value : contents of 0xC0000832
+ * @dram_size : in MB
+ * @cache_flush_interval : in seconds
+ * @bios_version :
+ * @board_type :
+ * @sense_alert :
+ * @write_config_count : increase with every configuration change
+ * @drive_inserted_count : increase with every drive inserted
+ * @inserted_drive : channel:Id of inserted drive
+ * @battery_status : bit 0: battery module missing
+ * bit 1: VBAD
+ * bit 2: temperature high
+ * bit 3: battery pack missing
+ * bit 4,5:
+ * 00 - charge complete
+ * 01 - fast charge in progress
+ * 10 - fast charge fail
+ * 11 - undefined
+ * bit 6: counter > 1000
+ * bit 7: Undefined
+ * @dec_fault_bus_info :
+ */
+typedef struct {
+ uint8_t max_commands;
+ uint8_t rebuild_rate;
+ uint8_t max_targ_per_chan;
+ uint8_t nchannels;
+ uint8_t fw_version[4];
+ uint16_t age_of_flash;
+ uint8_t chip_set_value;
+ uint8_t dram_size;
+ uint8_t cache_flush_interval;
+ uint8_t bios_version[4];
+ uint8_t board_type;
+ uint8_t sense_alert;
+ uint8_t write_config_count;
+ uint8_t battery_status;
+ uint8_t dec_fault_bus_info;
+} __attribute__ ((packed)) mraid_adapinfo_t;
+
+
+/**
+ * mraid_ldrv_info_t - information about the logical drives
+ * @nldrv : Number of logical drives configured
+ * @rsvd :
+ * @size : size of each logical drive
+ * @prop :
+ * @state : state of each logical drive
+ */
+typedef struct {
+ uint8_t nldrv;
+ uint8_t rsvd[3];
+ uint32_t size[MAX_LOGICAL_DRIVES_8LD];
+ uint8_t prop[MAX_LOGICAL_DRIVES_8LD];
+ uint8_t state[MAX_LOGICAL_DRIVES_8LD];
+} __attribute__ ((packed)) mraid_ldrv_info_t;
+
+
+/**
+ * mraid_pdrv_info_t - information about the physical drives
+ * @pdrv_state : state of each physical drive
+ */
+typedef struct {
+ uint8_t pdrv_state[MBOX_MAX_PHYSICAL_DRIVES];
+ uint8_t rsvd;
+} __attribute__ ((packed)) mraid_pdrv_info_t;
+
+
+/**
+ * mraid_inquiry_t - RAID inquiry, mailbox command 0x05
+ * @mraid_adapinfo_t : adapter information
+ * @mraid_ldrv_info_t : logical drives information
+ * @mraid_pdrv_info_t : physical drives information
+ */
+typedef struct {
+ mraid_adapinfo_t adapter_info;
+ mraid_ldrv_info_t logdrv_info;
+ mraid_pdrv_info_t pdrv_info;
+} __attribute__ ((packed)) mraid_inquiry_t;
+
+
+/**
+ * mraid_extinq_t - RAID extended inquiry, mailbox command 0x04
+ *
+ * @raid_inq : raid inquiry
+ * @phys_drv_format :
+ * @stack_attn :
+ * @modem_status :
+ * @rsvd :
+ */
+typedef struct {
+ mraid_inquiry_t raid_inq;
+ uint16_t phys_drv_format[MAX_MBOX_CHANNELS];
+ uint8_t stack_attn;
+ uint8_t modem_status;
+ uint8_t rsvd[2];
+} __attribute__ ((packed)) mraid_extinq_t;
+
+
+/**
+ * adap_device_t - device information
+ * @channel : channel fpor the device
+ * @target : target ID of the device
+ */
+typedef struct {
+ uint8_t channel;
+ uint8_t target;
+}__attribute__ ((packed)) adap_device_t;
+
+
+/**
+ * adap_span_40ld_t - 40LD span
+ * @start_blk : starting block
+ * @num_blks : number of blocks
+ */
+typedef struct {
+ uint32_t start_blk;
+ uint32_t num_blks;
+ adap_device_t device[MAX_ROW_SIZE_40LD];
+}__attribute__ ((packed)) adap_span_40ld_t;
+
+
+/**
+ * adap_span_8ld_t - 8LD span
+ * @start_blk : starting block
+ * @num_blks : number of blocks
+ */
+typedef struct {
+ uint32_t start_blk;
+ uint32_t num_blks;
+ adap_device_t device[MAX_ROW_SIZE_8LD];
+}__attribute__ ((packed)) adap_span_8ld_t;
+
+
+/**
+ * logdrv_param_t - logical drives parameters
+ *
+ * @span_depth : total number of spans
+ * @level : RAID level
+ * @read_ahead : read ahead, no read ahead, adaptive read ahead
+ * @stripe_sz : encoded stripe size
+ * @status : status of the logical drive
+ * @write_mode : write mode, write_through/write_back
+ * @direct_io : direct io or through cache
+ * @row_size : number of stripes in a row
+ */
+typedef struct {
+ uint8_t span_depth;
+ uint8_t level;
+ uint8_t read_ahead;
+ uint8_t stripe_sz;
+ uint8_t status;
+ uint8_t write_mode;
+ uint8_t direct_io;
+ uint8_t row_size;
+} __attribute__ ((packed)) logdrv_param_t;
+
+
+/**
+ * logdrv_40ld_t - logical drive definition for 40LD controllers
+ * @lparam : logical drives parameters
+ * @span : span
+ */
+typedef struct {
+ logdrv_param_t lparam;
+ adap_span_40ld_t span[SPAN_DEPTH_8_SPANS];
+}__attribute__ ((packed)) logdrv_40ld_t;
+
+
+/**
+ * logdrv_8ld_span8_t - logical drive definition for 8LD controllers
+ * @lparam : logical drives parameters
+ * @span : span
+ *
+ * 8-LD logical drive with up to 8 spans
+ */
+typedef struct {
+ logdrv_param_t lparam;
+ adap_span_8ld_t span[SPAN_DEPTH_8_SPANS];
+}__attribute__ ((packed)) logdrv_8ld_span8_t;
+
+
+/**
+ * logdrv_8ld_span4_t - logical drive definition for 8LD controllers
+ * @lparam : logical drives parameters
+ * @span : span
+ *
+ * 8-LD logical drive with up to 4 spans
+ */
+typedef struct {
+ logdrv_param_t lparam;
+ adap_span_8ld_t span[SPAN_DEPTH_4_SPANS];
+}__attribute__ ((packed)) logdrv_8ld_span4_t;
+
+
+/**
+ * phys_drive_t - physical device information
+ * @type : Type of the device
+ * @cur_status : current status of the device
+ * @tag_depth : Level of tagging
+ * @sync_neg : sync negotiation - ENABLE or DISABLE
+ * @size : configurable size in terms of 512 byte
+ */
+typedef struct {
+ uint8_t type;
+ uint8_t cur_status;
+ uint8_t tag_depth;
+ uint8_t sync_neg;
+ uint32_t size;
+}__attribute__ ((packed)) phys_drive_t;
+
+
+/**
+ * disk_array_40ld_t - disk array for 40LD controllers
+ * @numldrv : number of logical drives
+ * @resvd :
+ * @ldrv : logical drives information
+ * @pdrv : physical drives information
+ */
+typedef struct {
+ uint8_t numldrv;
+ uint8_t resvd[3];
+ logdrv_40ld_t ldrv[MAX_LOGICAL_DRIVES_40LD];
+ phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES];
+}__attribute__ ((packed)) disk_array_40ld_t;
+
+
+/**
+ * disk_array_8ld_span8_t - disk array for 8LD controllers
+ * @numldrv : number of logical drives
+ * @resvd :
+ * @ldrv : logical drives information
+ * @pdrv : physical drives information
+ *
+ * Disk array for 8LD logical drives with up to 8 spans
+ */
+typedef struct {
+ uint8_t numldrv;
+ uint8_t resvd[3];
+ logdrv_8ld_span8_t ldrv[MAX_LOGICAL_DRIVES_8LD];
+ phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES];
+}__attribute__ ((packed)) disk_array_8ld_span8_t;
+
+
+/**
+ * disk_array_8ld_span4_t - disk array for 8LD controllers
+ * @numldrv : number of logical drives
+ * @resvd :
+ * @ldrv : logical drives information
+ * @pdrv : physical drives information
+ *
+ * Disk array for 8LD logical drives with up to 4 spans
+ */
+typedef struct {
+ uint8_t numldrv;
+ uint8_t resvd[3];
+ logdrv_8ld_span4_t ldrv[MAX_LOGICAL_DRIVES_8LD];
+ phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES];
+}__attribute__ ((packed)) disk_array_8ld_span4_t;
+
+
+/**
+ * struct private_bios_data - bios private data for boot devices
+ * @geometry : bits 0-3 - BIOS geometry, 0x0001 - 1GB, 0x0010 - 2GB,
+ * 0x1000 - 8GB, Others values are invalid
+ * @unused : bits 4-7 are unused
+ * @boot_drv : logical drive set as boot drive, 0..7 - for 8LD cards,
+ * 0..39 - for 40LD cards
+ * @cksum : 0-(sum of first 13 bytes of this structure)
+ */
+struct private_bios_data {
+ uint8_t geometry :4;
+ uint8_t unused :4;
+ uint8_t boot_drv;
+ uint8_t rsvd[12];
+ uint16_t cksum;
+} __attribute__ ((packed));
+
+
+/**
+ * mbox_sgl64 - 64-bit scatter list for mailbox based controllers
+ * @address : address of the buffer
+ * @length : data transfer length
+ */
+typedef struct {
+ uint64_t address;
+ uint32_t length;
+} __attribute__ ((packed)) mbox_sgl64;
+
+/**
+ * mbox_sgl32 - 32-bit scatter list for mailbox based controllers
+ * @address : address of the buffer
+ * @length : data transfer length
+ */
+typedef struct {
+ uint32_t address;
+ uint32_t length;
+} __attribute__ ((packed)) mbox_sgl32;
+
+#endif // _MRAID_MBOX_DEFS_H_
+
+/* vim: set ts=8 sw=8 tw=78: */
diff --git a/kernel/drivers/scsi/megaraid/mega_common.h b/kernel/drivers/scsi/megaraid/mega_common.h
new file mode 100644
index 000000000..1d037ed52
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/mega_common.h
@@ -0,0 +1,290 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : mega_common.h
+ *
+ * Libaray of common routine used by all low-level megaraid drivers
+ */
+
+#ifndef _MEGA_COMMON_H_
+#define _MEGA_COMMON_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+
+#define LSI_MAX_CHANNELS 16
+#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1)
+
+#define HBA_SIGNATURE_64_BIT 0x299
+#define PCI_CONF_AMISIG64 0xa4
+
+#define MEGA_SCSI_INQ_EVPD 1
+#define MEGA_INVALID_FIELD_IN_CDB 0x24
+
+
+/**
+ * scb_t - scsi command control block
+ * @ccb : command control block for individual driver
+ * @list : list of control blocks
+ * @gp : general purpose field for LLDs
+ * @sno : all SCBs have a serial number
+ * @scp : associated scsi command
+ * @state : current state of scb
+ * @dma_dir : direction of data transfer
+ * @dma_type : transfer with sg list, buffer, or no data transfer
+ * @dev_channel : actual channel on the device
+ * @dev_target : actual target on the device
+ * @status : completion status
+ *
+ * This is our central data structure to issue commands the each driver.
+ * Driver specific data structures are maintained in the ccb field.
+ * scb provides a field 'gp', which can be used by LLD for its own purposes
+ *
+ * dev_channel and dev_target must be initialized with the actual channel and
+ * target on the controller.
+ */
+typedef struct {
+ caddr_t ccb;
+ struct list_head list;
+ unsigned long gp;
+ unsigned int sno;
+ struct scsi_cmnd *scp;
+ uint32_t state;
+ uint32_t dma_direction;
+ uint32_t dma_type;
+ uint16_t dev_channel;
+ uint16_t dev_target;
+ uint32_t status;
+} scb_t;
+
+/*
+ * SCB states as it transitions from one state to another
+ */
+#define SCB_FREE 0x0000 /* on the free list */
+#define SCB_ACTIVE 0x0001 /* off the free list */
+#define SCB_PENDQ 0x0002 /* on the pending queue */
+#define SCB_ISSUED 0x0004 /* issued - owner f/w */
+#define SCB_ABORT 0x0008 /* Got an abort for this one */
+#define SCB_RESET 0x0010 /* Got a reset for this one */
+
+/*
+ * DMA types for scb
+ */
+#define MRAID_DMA_NONE 0x0000 /* no data transfer for this command */
+#define MRAID_DMA_WSG 0x0001 /* data transfer using a sg list */
+#define MRAID_DMA_WBUF 0x0002 /* data transfer using a contiguous buffer */
+
+
+/**
+ * struct adapter_t - driver's initialization structure
+ * @aram dpc_h : tasklet handle
+ * @pdev : pci configuration pointer for kernel
+ * @host : pointer to host structure of mid-layer
+ * @lock : synchronization lock for mid-layer and driver
+ * @quiescent : driver is quiescent for now.
+ * @outstanding_cmds : number of commands pending in the driver
+ * @kscb_list : pointer to the bulk of SCBs pointers for IO
+ * @kscb_pool : pool of free scbs for IO
+ * @kscb_pool_lock : lock for pool of free scbs
+ * @pend_list : pending commands list
+ * @pend_list_lock : exclusion lock for pending commands list
+ * @completed_list : list of completed commands
+ * @completed_list_lock : exclusion lock for list of completed commands
+ * @sglen : max sg elements supported
+ * @device_ids : to convert kernel device addr to our devices.
+ * @raid_device : raid adapter specific pointer
+ * @max_channel : maximum channel number supported - inclusive
+ * @max_target : max target supported - inclusive
+ * @max_lun : max lun supported - inclusive
+ * @unique_id : unique identifier for each adapter
+ * @irq : IRQ for this adapter
+ * @ito : internal timeout value, (-1) means no timeout
+ * @ibuf : buffer to issue internal commands
+ * @ibuf_dma_h : dma handle for the above buffer
+ * @uscb_list : SCB pointers for user cmds, common mgmt module
+ * @uscb_pool : pool of SCBs for user commands
+ * @uscb_pool_lock : exclusion lock for these SCBs
+ * @max_cmds : max outstanding commands
+ * @fw_version : firmware version
+ * @bios_version : bios version
+ * @max_cdb_sz : biggest CDB size supported.
+ * @ha : is high availability present - clustering
+ * @init_id : initiator ID, the default value should be 7
+ * @max_sectors : max sectors per request
+ * @cmd_per_lun : max outstanding commands per LUN
+ * @being_detached : set when unloading, no more mgmt calls
+ *
+ *
+ * mraid_setup_device_map() can be called anytime after the device map is
+ * available and MRAID_GET_DEVICE_MAP() can be called whenever the mapping is
+ * required, usually from LLD's queue entry point. The formar API sets up the
+ * MRAID_IS_LOGICAL(adapter_t *, struct scsi_cmnd *) to find out if the
+ * device in question is a logical drive.
+ *
+ * quiescent flag should be set by the driver if it is not accepting more
+ * commands
+ *
+ * NOTE: The fields of this structures are placed to minimize cache misses
+ */
+
+// amount of space required to store the bios and firmware version strings
+#define VERSION_SIZE 16
+
+typedef struct {
+ struct tasklet_struct dpc_h;
+ struct pci_dev *pdev;
+ struct Scsi_Host *host;
+ spinlock_t lock;
+ uint8_t quiescent;
+ int outstanding_cmds;
+ scb_t *kscb_list;
+ struct list_head kscb_pool;
+ spinlock_t kscb_pool_lock;
+ struct list_head pend_list;
+ spinlock_t pend_list_lock;
+ struct list_head completed_list;
+ spinlock_t completed_list_lock;
+ uint16_t sglen;
+ int device_ids[LSI_MAX_CHANNELS]
+ [LSI_MAX_LOGICAL_DRIVES_64LD];
+ caddr_t raid_device;
+ uint8_t max_channel;
+ uint16_t max_target;
+ uint8_t max_lun;
+
+ uint32_t unique_id;
+ int irq;
+ uint8_t ito;
+ caddr_t ibuf;
+ dma_addr_t ibuf_dma_h;
+ scb_t *uscb_list;
+ struct list_head uscb_pool;
+ spinlock_t uscb_pool_lock;
+ int max_cmds;
+ uint8_t fw_version[VERSION_SIZE];
+ uint8_t bios_version[VERSION_SIZE];
+ uint8_t max_cdb_sz;
+ uint8_t ha;
+ uint16_t init_id;
+ uint16_t max_sectors;
+ uint16_t cmd_per_lun;
+ atomic_t being_detached;
+} adapter_t;
+
+#define SCSI_FREE_LIST_LOCK(adapter) (&adapter->kscb_pool_lock)
+#define USER_FREE_LIST_LOCK(adapter) (&adapter->uscb_pool_lock)
+#define PENDING_LIST_LOCK(adapter) (&adapter->pend_list_lock)
+#define COMPLETED_LIST_LOCK(adapter) (&adapter->completed_list_lock)
+
+
+// conversion from scsi command
+#define SCP2HOST(scp) (scp)->device->host // to host
+#define SCP2HOSTDATA(scp) SCP2HOST(scp)->hostdata // to soft state
+#define SCP2CHANNEL(scp) (scp)->device->channel // to channel
+#define SCP2TARGET(scp) (scp)->device->id // to target
+#define SCP2LUN(scp) (u32)(scp)->device->lun // to LUN
+
+// generic macro to convert scsi command and host to controller's soft state
+#define SCSIHOST2ADAP(host) (((caddr_t *)(host->hostdata))[0])
+#define SCP2ADAPTER(scp) (adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp))
+
+
+#define MRAID_IS_LOGICAL(adp, scp) \
+ (SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0
+
+#define MRAID_IS_LOGICAL_SDEV(adp, sdev) \
+ (sdev->channel == (adp)->max_channel) ? 1 : 0
+
+/**
+ * MRAID_GET_DEVICE_MAP - device ids
+ * @adp : adapter's soft state
+ * @scp : mid-layer scsi command pointer
+ * @p_chan : physical channel on the controller
+ * @target : target id of the device or logical drive number
+ * @islogical : set if the command is for the logical drive
+ *
+ * Macro to retrieve information about device class, logical or physical and
+ * the corresponding physical channel and target or logical drive number
+ */
+#define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \
+ /* \
+ * Is the request coming for the virtual channel \
+ */ \
+ islogical = MRAID_IS_LOGICAL(adp, scp); \
+ \
+ /* \
+ * Get an index into our table of drive ids mapping \
+ */ \
+ if (islogical) { \
+ p_chan = 0xFF; \
+ target = \
+ (adp)->device_ids[(adp)->max_channel][SCP2TARGET(scp)]; \
+ } \
+ else { \
+ p_chan = ((adp)->device_ids[SCP2CHANNEL(scp)] \
+ [SCP2TARGET(scp)] >> 8) & 0xFF; \
+ target = ((adp)->device_ids[SCP2CHANNEL(scp)] \
+ [SCP2TARGET(scp)] & 0xFF); \
+ }
+
+/*
+ * ### Helper routines ###
+ */
+#define LSI_DBGLVL mraid_debug_level // each LLD must define a global
+ // mraid_debug_level
+
+#ifdef DEBUG
+#if defined (_ASSERT_PANIC)
+#define ASSERT_ACTION panic
+#else
+#define ASSERT_ACTION printk
+#endif
+
+#define ASSERT(expression) \
+ if (!(expression)) { \
+ ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \
+ #expression, __FILE__, __LINE__, __func__); \
+ }
+#else
+#define ASSERT(expression)
+#endif
+
+/**
+ * struct mraid_pci_blk - structure holds DMA memory block info
+ * @vaddr : virtual address to a memory block
+ * @dma_addr : DMA handle to a memory block
+ *
+ * This structure is filled up for the caller. It is the responsibilty of the
+ * caller to allocate this array big enough to store addresses for all
+ * requested elements
+ */
+struct mraid_pci_blk {
+ caddr_t vaddr;
+ dma_addr_t dma_addr;
+};
+
+#endif // _MEGA_COMMON_H_
+
+// vim: set ts=8 sw=8 tw=78:
diff --git a/kernel/drivers/scsi/megaraid/megaraid_ioctl.h b/kernel/drivers/scsi/megaraid/megaraid_ioctl.h
new file mode 100644
index 000000000..05f6e4ec3
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -0,0 +1,300 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_ioctl.h
+ *
+ * Definitions to interface with user level applications
+ */
+
+#ifndef _MEGARAID_IOCTL_H_
+#define _MEGARAID_IOCTL_H_
+
+#include <linux/types.h>
+#include <linux/semaphore.h>
+
+#include "mbox_defs.h"
+
+/*
+ * console messages debug levels
+ */
+#define CL_ANN 0 /* print unconditionally, announcements */
+#define CL_DLEVEL1 1 /* debug level 1, informative */
+#define CL_DLEVEL2 2 /* debug level 2, verbose */
+#define CL_DLEVEL3 3 /* debug level 3, very verbose */
+
+/**
+ * con_log() - console log routine
+ * @level : indicates the severity of the message.
+ * @fmt : format string
+ *
+ * con_log displays the error messages on the console based on the current
+ * debug level. Also it attaches the appropriate kernel severity level with
+ * the message.
+ */
+#define con_log(level, fmt) if (LSI_DBGLVL >= level) printk fmt;
+
+/*
+ * Definitions & Declarations needed to use common management module
+ */
+
+#define MEGAIOC_MAGIC 'm'
+#define MEGAIOCCMD _IOWR(MEGAIOC_MAGIC, 0, mimd_t)
+
+#define MEGAIOC_QNADAP 'm' /* Query # of adapters */
+#define MEGAIOC_QDRVRVER 'e' /* Query driver version */
+#define MEGAIOC_QADAPINFO 'g' /* Query adapter information */
+
+#define USCSICMD 0x80
+#define UIOC_RD 0x00001
+#define UIOC_WR 0x00002
+
+#define MBOX_CMD 0x00000
+#define GET_DRIVER_VER 0x10000
+#define GET_N_ADAP 0x20000
+#define GET_ADAP_INFO 0x30000
+#define GET_CAP 0x40000
+#define GET_STATS 0x50000
+#define GET_IOCTL_VERSION 0x01
+
+#define EXT_IOCTL_SIGN_SZ 16
+#define EXT_IOCTL_SIGN "$$_EXTD_IOCTL_$$"
+
+#define MBOX_LEGACY 0x00 /* ioctl has legacy mbox*/
+#define MBOX_HPE 0x01 /* ioctl has hpe mbox */
+
+#define APPTYPE_MIMD 0x00 /* old existing apps */
+#define APPTYPE_UIOC 0x01 /* new apps using uioc */
+
+#define IOCTL_ISSUE 0x00000001 /* Issue ioctl */
+#define IOCTL_ABORT 0x00000002 /* Abort previous ioctl */
+
+#define DRVRTYPE_MBOX 0x00000001 /* regular mbox driver */
+#define DRVRTYPE_HPE 0x00000002 /* new hpe driver */
+
+#define MKADAP(adapno) (MEGAIOC_MAGIC << 8 | (adapno) )
+#define GETADAP(mkadap) ((mkadap) ^ MEGAIOC_MAGIC << 8)
+
+#define MAX_DMA_POOLS 5 /* 4k, 8k, 16k, 32k, 64k*/
+
+
+/**
+ * struct uioc_t - the common ioctl packet structure
+ *
+ * @signature : Must be "$$_EXTD_IOCTL_$$"
+ * @mb_type : Type of the mail box (MB_LEGACY or MB_HPE)
+ * @app_type : Type of the issuing application (existing or new)
+ * @opcode : Opcode of the command
+ * @adapno : Adapter number
+ * @cmdbuf : Pointer to buffer - can point to mbox or plain data buffer
+ * @xferlen : xferlen for DCMD and non mailbox commands
+ * @data_dir : Direction of the data transfer
+ * @status : Status from the driver
+ * @reserved : reserved bytes for future expansion
+ *
+ * @user_data : user data transfer address is saved in this
+ * @user_data_len: length of the data buffer sent by user app
+ * @user_pthru : user passthru address is saves in this (null if DCMD)
+ * @pthru32 : kernel address passthru (allocated per kioc)
+ * @pthru32_h : physicall address of @pthru32
+ * @list : for kioc free pool list maintenance
+ * @done : call back routine for llds to call when kioc is completed
+ * @buf_vaddr : dma pool buffer attached to kioc for data transfer
+ * @buf_paddr : physical address of the dma pool buffer
+ * @pool_index : index of the dma pool that @buf_vaddr is taken from
+ * @free_buf : indicates if buffer needs to be freed after kioc completes
+ *
+ * Note : All LSI drivers understand only this packet. Any other
+ * : format sent by applications would be converted to this.
+ */
+typedef struct uioc {
+
+/* User Apps: */
+
+ uint8_t signature[EXT_IOCTL_SIGN_SZ];
+ uint16_t mb_type;
+ uint16_t app_type;
+ uint32_t opcode;
+ uint32_t adapno;
+ uint64_t cmdbuf;
+ uint32_t xferlen;
+ uint32_t data_dir;
+ int32_t status;
+ uint8_t reserved[128];
+
+/* Driver Data: */
+ void __user * user_data;
+ uint32_t user_data_len;
+
+ /* 64bit alignment */
+ uint32_t pad_for_64bit_align;
+
+ mraid_passthru_t __user *user_pthru;
+
+ mraid_passthru_t *pthru32;
+ dma_addr_t pthru32_h;
+
+ struct list_head list;
+ void (*done)(struct uioc*);
+
+ caddr_t buf_vaddr;
+ dma_addr_t buf_paddr;
+ int8_t pool_index;
+ uint8_t free_buf;
+
+ uint8_t timedout;
+
+} __attribute__ ((aligned(1024),packed)) uioc_t;
+
+
+/**
+ * struct mraid_hba_info - information about the controller
+ *
+ * @pci_vendor_id : PCI vendor id
+ * @pci_device_id : PCI device id
+ * @subsystem_vendor_id : PCI subsystem vendor id
+ * @subsystem_device_id : PCI subsystem device id
+ * @baseport : base port of hba memory
+ * @pci_bus : PCI bus
+ * @pci_dev_fn : PCI device/function values
+ * @irq : interrupt vector for the device
+ *
+ * Extended information of 256 bytes about the controller. Align on the single
+ * byte boundary so that 32-bit applications can be run on 64-bit platform
+ * drivers withoug re-compilation.
+ * NOTE: reduce the number of reserved bytes whenever new field are added, so
+ * that total size of the structure remains 256 bytes.
+ */
+typedef struct mraid_hba_info {
+
+ uint16_t pci_vendor_id;
+ uint16_t pci_device_id;
+ uint16_t subsys_vendor_id;
+ uint16_t subsys_device_id;
+
+ uint64_t baseport;
+ uint8_t pci_bus;
+ uint8_t pci_dev_fn;
+ uint8_t pci_slot;
+ uint8_t irq;
+
+ uint32_t unique_id;
+ uint32_t host_no;
+
+ uint8_t num_ldrv;
+} __attribute__ ((aligned(256), packed)) mraid_hba_info_t;
+
+
+/**
+ * mcontroller : adapter info structure for old mimd_t apps
+ *
+ * @base : base address
+ * @irq : irq number
+ * @numldrv : number of logical drives
+ * @pcibus : pci bus
+ * @pcidev : pci device
+ * @pcifun : pci function
+ * @pciid : pci id
+ * @pcivendor : vendor id
+ * @pcislot : slot number
+ * @uid : unique id
+ */
+typedef struct mcontroller {
+
+ uint64_t base;
+ uint8_t irq;
+ uint8_t numldrv;
+ uint8_t pcibus;
+ uint16_t pcidev;
+ uint8_t pcifun;
+ uint16_t pciid;
+ uint16_t pcivendor;
+ uint8_t pcislot;
+ uint32_t uid;
+
+} __attribute__ ((packed)) mcontroller_t;
+
+
+/**
+ * mm_dmapool_t : Represents one dma pool with just one buffer
+ *
+ * @vaddr : Virtual address
+ * @paddr : DMA physicall address
+ * @bufsize : In KB - 4 = 4k, 8 = 8k etc.
+ * @handle : Handle to the dma pool
+ * @lock : lock to synchronize access to the pool
+ * @in_use : If pool already in use, attach new block
+ */
+typedef struct mm_dmapool {
+ caddr_t vaddr;
+ dma_addr_t paddr;
+ uint32_t buf_size;
+ struct dma_pool *handle;
+ spinlock_t lock;
+ uint8_t in_use;
+} mm_dmapool_t;
+
+
+/**
+ * mraid_mmadp_t: Structure that drivers pass during (un)registration
+ *
+ * @unique_id : Any unique id (usually PCI bus+dev+fn)
+ * @drvr_type : megaraid or hpe (DRVRTYPE_MBOX or DRVRTYPE_HPE)
+ * @drv_data : Driver specific; not touched by the common module
+ * @timeout : timeout for issued kiocs
+ * @max_kioc : Maximum ioctl packets acceptable by the lld
+ * @pdev : pci dev; used for allocating dma'ble memory
+ * @issue_uioc : Driver supplied routine to issue uioc_t commands
+ * : issue_uioc(drvr_data, kioc, ISSUE/ABORT, uioc_done)
+ * @quiescent : flag to indicate if ioctl can be issued to this adp
+ * @list : attach with the global list of adapters
+ * @kioc_list : block of mem for @max_kioc number of kiocs
+ * @kioc_pool : pool of free kiocs
+ * @kioc_pool_lock : protection for free pool
+ * @kioc_semaphore : so as not to exceed @max_kioc parallel ioctls
+ * @mbox_list : block of mem for @max_kioc number of mboxes
+ * @pthru_dma_pool : DMA pool to allocate passthru packets
+ * @dma_pool_list : array of dma pools
+ */
+
+typedef struct mraid_mmadp {
+
+/* Filled by driver */
+
+ uint32_t unique_id;
+ uint32_t drvr_type;
+ unsigned long drvr_data;
+ uint16_t timeout;
+ uint8_t max_kioc;
+
+ struct pci_dev *pdev;
+
+ int(*issue_uioc)(unsigned long, uioc_t *, uint32_t);
+
+/* Maintained by common module */
+ uint32_t quiescent;
+
+ struct list_head list;
+ uioc_t *kioc_list;
+ struct list_head kioc_pool;
+ spinlock_t kioc_pool_lock;
+ struct semaphore kioc_semaphore;
+
+ mbox64_t *mbox_list;
+ struct dma_pool *pthru_dma_pool;
+ mm_dmapool_t dma_pool_list[MAX_DMA_POOLS];
+
+} mraid_mmadp_t;
+
+int mraid_mm_register_adp(mraid_mmadp_t *);
+int mraid_mm_unregister_adp(uint32_t);
+uint32_t mraid_mm_adapter_app_handle(uint32_t);
+
+#endif /* _MEGARAID_IOCTL_H_ */
diff --git a/kernel/drivers/scsi/megaraid/megaraid_mbox.c b/kernel/drivers/scsi/megaraid/megaraid_mbox.c
new file mode 100644
index 000000000..f0987f22e
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/megaraid_mbox.c
@@ -0,0 +1,4145 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_mbox.c
+ * Version : v2.20.5.1 (Nov 16 2006)
+ *
+ * Authors:
+ * Atul Mukker <Atul.Mukker@lsi.com>
+ * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsi.com>
+ * Manoj Jose <Manoj.Jose@lsi.com>
+ * Seokmann Ju
+ *
+ * List of supported controllers
+ *
+ * OEM Product Name VID DID SSVID SSID
+ * --- ------------ --- --- ---- ----
+ * Dell PERC3/QC 101E 1960 1028 0471
+ * Dell PERC3/DC 101E 1960 1028 0493
+ * Dell PERC3/SC 101E 1960 1028 0475
+ * Dell PERC3/Di 1028 1960 1028 0123
+ * Dell PERC4/SC 1000 1960 1028 0520
+ * Dell PERC4/DC 1000 1960 1028 0518
+ * Dell PERC4/QC 1000 0407 1028 0531
+ * Dell PERC4/Di 1028 000F 1028 014A
+ * Dell PERC 4e/Si 1028 0013 1028 016c
+ * Dell PERC 4e/Di 1028 0013 1028 016d
+ * Dell PERC 4e/Di 1028 0013 1028 016e
+ * Dell PERC 4e/Di 1028 0013 1028 016f
+ * Dell PERC 4e/Di 1028 0013 1028 0170
+ * Dell PERC 4e/DC 1000 0408 1028 0002
+ * Dell PERC 4e/SC 1000 0408 1028 0001
+ *
+ *
+ * LSI MegaRAID SCSI 320-0 1000 1960 1000 A520
+ * LSI MegaRAID SCSI 320-1 1000 1960 1000 0520
+ * LSI MegaRAID SCSI 320-2 1000 1960 1000 0518
+ * LSI MegaRAID SCSI 320-0X 1000 0407 1000 0530
+ * LSI MegaRAID SCSI 320-2X 1000 0407 1000 0532
+ * LSI MegaRAID SCSI 320-4X 1000 0407 1000 0531
+ * LSI MegaRAID SCSI 320-1E 1000 0408 1000 0001
+ * LSI MegaRAID SCSI 320-2E 1000 0408 1000 0002
+ * LSI MegaRAID SATA 150-4 1000 1960 1000 4523
+ * LSI MegaRAID SATA 150-6 1000 1960 1000 0523
+ * LSI MegaRAID SATA 300-4X 1000 0409 1000 3004
+ * LSI MegaRAID SATA 300-8X 1000 0409 1000 3008
+ *
+ * INTEL RAID Controller SRCU42X 1000 0407 8086 0532
+ * INTEL RAID Controller SRCS16 1000 1960 8086 0523
+ * INTEL RAID Controller SRCU42E 1000 0408 8086 0002
+ * INTEL RAID Controller SRCZCRX 1000 0407 8086 0530
+ * INTEL RAID Controller SRCS28X 1000 0409 8086 3008
+ * INTEL RAID Controller SROMBU42E 1000 0408 8086 3431
+ * INTEL RAID Controller SROMBU42E 1000 0408 8086 3499
+ * INTEL RAID Controller SRCU51L 1000 1960 8086 0520
+ *
+ * FSC MegaRAID PCI Express ROMB 1000 0408 1734 1065
+ *
+ * ACER MegaRAID ROMB-2E 1000 0408 1025 004D
+ *
+ * NEC MegaRAID PCI Express ROMB 1000 0408 1033 8287
+ *
+ * For history of changes, see Documentation/scsi/ChangeLog.megaraid
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "megaraid_mbox.h"
+
+static int megaraid_init(void);
+static void megaraid_exit(void);
+
+static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *);
+static void megaraid_detach_one(struct pci_dev *);
+static void megaraid_mbox_shutdown(struct pci_dev *);
+
+static int megaraid_io_attach(adapter_t *);
+static void megaraid_io_detach(adapter_t *);
+
+static int megaraid_init_mbox(adapter_t *);
+static void megaraid_fini_mbox(adapter_t *);
+
+static int megaraid_alloc_cmd_packets(adapter_t *);
+static void megaraid_free_cmd_packets(adapter_t *);
+
+static int megaraid_mbox_setup_dma_pools(adapter_t *);
+static void megaraid_mbox_teardown_dma_pools(adapter_t *);
+
+static int megaraid_sysfs_alloc_resources(adapter_t *);
+static void megaraid_sysfs_free_resources(adapter_t *);
+
+static int megaraid_abort_handler(struct scsi_cmnd *);
+static int megaraid_reset_handler(struct scsi_cmnd *);
+
+static int mbox_post_sync_cmd(adapter_t *, uint8_t []);
+static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []);
+static int megaraid_busywait_mbox(mraid_device_t *);
+static int megaraid_mbox_product_info(adapter_t *);
+static int megaraid_mbox_extended_cdb(adapter_t *);
+static int megaraid_mbox_support_ha(adapter_t *, uint16_t *);
+static int megaraid_mbox_support_random_del(adapter_t *);
+static int megaraid_mbox_get_max_sg(adapter_t *);
+static void megaraid_mbox_enum_raid_scsi(adapter_t *);
+static void megaraid_mbox_flush_cache(adapter_t *);
+static int megaraid_mbox_fire_sync_cmd(adapter_t *);
+
+static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
+static void megaraid_mbox_setup_device_map(adapter_t *);
+
+static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
+static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
+static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
+static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
+ struct scsi_cmnd *);
+static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *,
+ struct scsi_cmnd *);
+
+static irqreturn_t megaraid_isr(int, void *);
+
+static void megaraid_mbox_dpc(unsigned long);
+
+static ssize_t megaraid_sysfs_show_app_hndl(struct device *, struct device_attribute *attr, char *);
+static ssize_t megaraid_sysfs_show_ldnum(struct device *, struct device_attribute *attr, char *);
+
+static int megaraid_cmm_register(adapter_t *);
+static int megaraid_cmm_unregister(adapter_t *);
+static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t);
+static int megaraid_mbox_mm_command(adapter_t *, uioc_t *);
+static void megaraid_mbox_mm_done(adapter_t *, scb_t *);
+static int gather_hbainfo(adapter_t *, mraid_hba_info_t *);
+static int wait_till_fw_empty(adapter_t *);
+
+
+
+MODULE_AUTHOR("megaraidlinux@lsi.com");
+MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MEGARAID_VERSION);
+
+/*
+ * ### modules parameters for driver ###
+ */
+
+/*
+ * Set to enable driver to expose unconfigured disk to kernel
+ */
+static int megaraid_expose_unconf_disks = 0;
+module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
+MODULE_PARM_DESC(unconf_disks,
+ "Set to expose unconfigured disks to kernel (default=0)");
+
+/*
+ * driver wait time if the adapter's mailbox is busy
+ */
+static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT;
+module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
+MODULE_PARM_DESC(busy_wait,
+ "Max wait for mailbox in microseconds if busy (default=10)");
+
+/*
+ * number of sectors per IO command
+ */
+static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS;
+module_param_named(max_sectors, megaraid_max_sectors, int, 0);
+MODULE_PARM_DESC(max_sectors,
+ "Maximum number of sectors per IO command (default=128)");
+
+/*
+ * number of commands per logical unit
+ */
+static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
+module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0);
+MODULE_PARM_DESC(cmd_per_lun,
+ "Maximum number of commands per logical unit (default=64)");
+
+
+/*
+ * Fast driver load option, skip scanning for physical devices during load.
+ * This would result in non-disk devices being skipped during driver load
+ * time. These can be later added though, using /proc/scsi/scsi
+ */
+static unsigned int megaraid_fast_load = 0;
+module_param_named(fast_load, megaraid_fast_load, int, 0);
+MODULE_PARM_DESC(fast_load,
+ "Faster loading of the driver, skips physical devices! (default=0)");
+
+
+/*
+ * mraid_debug level - threshold for amount of information to be displayed by
+ * the driver. This level can be changed through modules parameters, ioctl or
+ * sysfs/proc interface. By default, print the announcement messages only.
+ */
+int mraid_debug_level = CL_ANN;
+module_param_named(debug_level, mraid_debug_level, int, 0);
+MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
+
+/*
+ * ### global data ###
+ */
+static uint8_t megaraid_mbox_version[8] =
+ { 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
+
+
+/*
+ * PCI table for all supported controllers.
+ */
+static struct pci_device_id pci_id_table_g[] = {
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4_DI_DISCOVERY,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4_DI_DISCOVERY,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_PERC4_SC,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4_SC,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_PERC4_DC,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4_DC,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_VERDE,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4_DI_EVERGLADES,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4_DI_EVERGLADES,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4E_SI_BIGBEND,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4E_SI_BIGBEND,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4E_DI_KOBUK,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4E_DI_KOBUK,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4E_DI_CORVETTE,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4E_DI_CORVETTE,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4E_DI_EXPEDITION,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION,
+ },
+ {
+ PCI_VENDOR_ID_DELL,
+ PCI_DEVICE_ID_PERC4E_DI_GUADALUPE,
+ PCI_VENDOR_ID_DELL,
+ PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_DOBSON,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {
+ PCI_VENDOR_ID_AMI,
+ PCI_DEVICE_ID_AMI_MEGARAID3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_AMI_MEGARAID3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {
+ PCI_VENDOR_ID_LSI_LOGIC,
+ PCI_DEVICE_ID_LINDSAY,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, pci_id_table_g);
+
+
+static struct pci_driver megaraid_pci_driver = {
+ .name = "megaraid",
+ .id_table = pci_id_table_g,
+ .probe = megaraid_probe_one,
+ .remove = megaraid_detach_one,
+ .shutdown = megaraid_mbox_shutdown,
+};
+
+
+
+// definitions for the device attributes for exporting logical drive number
+// for a scsi address (Host, Channel, Id, Lun)
+
+DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
+ NULL);
+
+// Host template initializer for megaraid mbox sysfs device attributes
+static struct device_attribute *megaraid_shost_attrs[] = {
+ &dev_attr_megaraid_mbox_app_hndl,
+ NULL,
+};
+
+
+DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
+
+// Host template initializer for megaraid mbox sysfs device attributes
+static struct device_attribute *megaraid_sdev_attrs[] = {
+ &dev_attr_megaraid_mbox_ld,
+ NULL,
+};
+
+/*
+ * Scsi host template for megaraid unified driver
+ */
+static struct scsi_host_template megaraid_template_g = {
+ .module = THIS_MODULE,
+ .name = "LSI Logic MegaRAID driver",
+ .proc_name = "megaraid",
+ .queuecommand = megaraid_queue_command,
+ .eh_abort_handler = megaraid_abort_handler,
+ .eh_device_reset_handler = megaraid_reset_handler,
+ .eh_bus_reset_handler = megaraid_reset_handler,
+ .eh_host_reset_handler = megaraid_reset_handler,
+ .change_queue_depth = scsi_change_queue_depth,
+ .use_clustering = ENABLE_CLUSTERING,
+ .no_write_same = 1,
+ .sdev_attrs = megaraid_sdev_attrs,
+ .shost_attrs = megaraid_shost_attrs,
+};
+
+
+/**
+ * megaraid_init - module load hook
+ *
+ * We register ourselves as hotplug enabled module and let PCI subsystem
+ * discover our adapters.
+ */
+static int __init
+megaraid_init(void)
+{
+ int rval;
+
+ // Announce the driver version
+ con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION,
+ MEGARAID_EXT_VERSION));
+
+ // check validity of module parameters
+ if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: max commands per lun reset to %d\n",
+ MBOX_MAX_SCSI_CMDS));
+
+ megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS;
+ }
+
+
+ // register as a PCI hot-plug driver module
+ rval = pci_register_driver(&megaraid_pci_driver);
+ if (rval < 0) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: could not register hotplug support.\n"));
+ }
+
+ return rval;
+}
+
+
+/**
+ * megaraid_exit - driver unload entry point
+ *
+ * We simply unwrap the megaraid_init routine here.
+ */
+static void __exit
+megaraid_exit(void)
+{
+ con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
+
+ // unregister as PCI hotplug driver
+ pci_unregister_driver(&megaraid_pci_driver);
+
+ return;
+}
+
+
+/**
+ * megaraid_probe_one - PCI hotplug entry point
+ * @pdev : handle to this controller's PCI configuration space
+ * @id : pci device id of the class of controllers
+ *
+ * This routine should be called whenever a new adapter is detected by the
+ * PCI hotplug susbsystem.
+ */
+static int
+megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ adapter_t *adapter;
+
+
+ // detected a new controller
+ con_log(CL_ANN, (KERN_INFO
+ "megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device));
+
+ con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)));
+
+ if (pci_enable_device(pdev)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: pci_enable_device failed\n"));
+
+ return -ENODEV;
+ }
+
+ // Enable bus-mastering on this controller
+ pci_set_master(pdev);
+
+ // Allocate the per driver initialization structure
+ adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL);
+
+ if (adapter == NULL) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d.\n", __func__, __LINE__));
+
+ goto out_probe_one;
+ }
+
+
+ // set up PCI related soft state and other pre-known parameters
+ adapter->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ adapter->irq = pdev->irq;
+ adapter->pdev = pdev;
+
+ atomic_set(&adapter->being_detached, 0);
+
+ // Setup the default DMA mask. This would be changed later on
+ // depending on hardware capabilities
+ if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
+
+ goto out_free_adapter;
+ }
+
+
+ // Initialize the synchronization lock for kernel and LLD
+ spin_lock_init(&adapter->lock);
+
+ // Initialize the command queues: the list of free SCBs and the list
+ // of pending SCBs.
+ INIT_LIST_HEAD(&adapter->kscb_pool);
+ spin_lock_init(SCSI_FREE_LIST_LOCK(adapter));
+
+ INIT_LIST_HEAD(&adapter->pend_list);
+ spin_lock_init(PENDING_LIST_LOCK(adapter));
+
+ INIT_LIST_HEAD(&adapter->completed_list);
+ spin_lock_init(COMPLETED_LIST_LOCK(adapter));
+
+
+ // Start the mailbox based controller
+ if (megaraid_init_mbox(adapter) != 0) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: maibox adapter did not initialize\n"));
+
+ goto out_free_adapter;
+ }
+
+ // Register with LSI Common Management Module
+ if (megaraid_cmm_register(adapter) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: could not register with management module\n"));
+
+ goto out_fini_mbox;
+ }
+
+ // setup adapter handle in PCI soft state
+ pci_set_drvdata(pdev, adapter);
+
+ // attach with scsi mid-layer
+ if (megaraid_io_attach(adapter) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n"));
+
+ goto out_cmm_unreg;
+ }
+
+ return 0;
+
+out_cmm_unreg:
+ megaraid_cmm_unregister(adapter);
+out_fini_mbox:
+ megaraid_fini_mbox(adapter);
+out_free_adapter:
+ kfree(adapter);
+out_probe_one:
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+}
+
+
+/**
+ * megaraid_detach_one - release framework resources and call LLD release routine
+ * @pdev : handle for our PCI configuration space
+ *
+ * This routine is called during driver unload. We free all the allocated
+ * resources and call the corresponding LLD so that it can also release all
+ * its resources.
+ *
+ * This routine is also called from the PCI hotplug system.
+ */
+static void
+megaraid_detach_one(struct pci_dev *pdev)
+{
+ adapter_t *adapter;
+ struct Scsi_Host *host;
+
+
+ // Start a rollback on this adapter
+ adapter = pci_get_drvdata(pdev);
+
+ if (!adapter) {
+ con_log(CL_ANN, (KERN_CRIT
+ "megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device));
+
+ return;
+ }
+ else {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device));
+ }
+
+
+ host = adapter->host;
+
+ // do not allow any more requests from the management module for this
+ // adapter.
+ // FIXME: How do we account for the request which might still be
+ // pending with us?
+ atomic_set(&adapter->being_detached, 1);
+
+ // detach from the IO sub-system
+ megaraid_io_detach(adapter);
+
+ // Unregister from common management module
+ //
+ // FIXME: this must return success or failure for conditions if there
+ // is a command pending with LLD or not.
+ megaraid_cmm_unregister(adapter);
+
+ // finalize the mailbox based controller and release all resources
+ megaraid_fini_mbox(adapter);
+
+ kfree(adapter);
+
+ scsi_host_put(host);
+
+ pci_disable_device(pdev);
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA
+ * @pdev : generic driver model device
+ *
+ * Shutdown notification, perform flush cache.
+ */
+static void
+megaraid_mbox_shutdown(struct pci_dev *pdev)
+{
+ adapter_t *adapter = pci_get_drvdata(pdev);
+ static int counter;
+
+ if (!adapter) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: null device in shutdown\n"));
+ return;
+ }
+
+ // flush caches now
+ con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...",
+ counter++));
+
+ megaraid_mbox_flush_cache(adapter);
+
+ con_log(CL_ANN, ("done\n"));
+}
+
+
+/**
+ * megaraid_io_attach - attach a device with the IO subsystem
+ * @adapter : controller's soft state
+ *
+ * Attach this device with the IO subsystem.
+ */
+static int
+megaraid_io_attach(adapter_t *adapter)
+{
+ struct Scsi_Host *host;
+
+ // Initialize SCSI Host structure
+ host = scsi_host_alloc(&megaraid_template_g, 8);
+ if (!host) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mbox: scsi_register failed\n"));
+
+ return -1;
+ }
+
+ SCSIHOST2ADAP(host) = (caddr_t)adapter;
+ adapter->host = host;
+
+ host->irq = adapter->irq;
+ host->unique_id = adapter->unique_id;
+ host->can_queue = adapter->max_cmds;
+ host->this_id = adapter->init_id;
+ host->sg_tablesize = adapter->sglen;
+ host->max_sectors = adapter->max_sectors;
+ host->cmd_per_lun = adapter->cmd_per_lun;
+ host->max_channel = adapter->max_channel;
+ host->max_id = adapter->max_target;
+ host->max_lun = adapter->max_lun;
+
+
+ // notify mid-layer about the new controller
+ if (scsi_add_host(host, &adapter->pdev->dev)) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mbox: scsi_add_host failed\n"));
+
+ scsi_host_put(host);
+
+ return -1;
+ }
+
+ scsi_scan_host(host);
+
+ return 0;
+}
+
+
+/**
+ * megaraid_io_detach - detach a device from the IO subsystem
+ * @adapter : controller's soft state
+ *
+ * Detach this device from the IO subsystem.
+ */
+static void
+megaraid_io_detach(adapter_t *adapter)
+{
+ struct Scsi_Host *host;
+
+ con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n"));
+
+ host = adapter->host;
+
+ scsi_remove_host(host);
+
+ return;
+}
+
+
+/*
+ * START: Mailbox Low Level Driver
+ *
+ * This is section specific to the single mailbox based controllers
+ */
+
+/**
+ * megaraid_init_mbox - initialize controller
+ * @adapter : our soft state
+ *
+ * - Allocate 16-byte aligned mailbox memory for firmware handshake
+ * - Allocate controller's memory resources
+ * - Find out all initialization data
+ * - Allocate memory required for all the commands
+ * - Use internal library of FW routines, build up complete soft state
+ */
+static int
+megaraid_init_mbox(adapter_t *adapter)
+{
+ struct pci_dev *pdev;
+ mraid_device_t *raid_dev;
+ int i;
+ uint32_t magic64;
+
+
+ adapter->ito = MBOX_TIMEOUT;
+ pdev = adapter->pdev;
+
+ /*
+ * Allocate and initialize the init data structure for mailbox
+ * controllers
+ */
+ raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL);
+ if (raid_dev == NULL) return -1;
+
+
+ /*
+ * Attach the adapter soft state to raid device soft state
+ */
+ adapter->raid_device = (caddr_t)raid_dev;
+ raid_dev->fast_load = megaraid_fast_load;
+
+
+ // our baseport
+ raid_dev->baseport = pci_resource_start(pdev, 0);
+
+ if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: mem region busy\n"));
+
+ goto out_free_raid_dev;
+ }
+
+ raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
+
+ if (!raid_dev->baseaddr) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: could not map hba memory\n") );
+
+ goto out_release_regions;
+ }
+
+ /* initialize the mutual exclusion lock for the mailbox */
+ spin_lock_init(&raid_dev->mailbox_lock);
+
+ /* allocate memory required for commands */
+ if (megaraid_alloc_cmd_packets(adapter) != 0)
+ goto out_iounmap;
+
+ /*
+ * Issue SYNC cmd to flush the pending cmds in the adapter
+ * and initialize its internal state
+ */
+
+ if (megaraid_mbox_fire_sync_cmd(adapter))
+ con_log(CL_ANN, ("megaraid: sync cmd failed\n"));
+
+ /*
+ * Setup the rest of the soft state using the library of
+ * FW routines
+ */
+
+ /* request IRQ and register the interrupt service routine */
+ if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid",
+ adapter)) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: Couldn't register IRQ %d!\n", adapter->irq));
+ goto out_alloc_cmds;
+
+ }
+
+ // Product info
+ if (megaraid_mbox_product_info(adapter) != 0)
+ goto out_free_irq;
+
+ // Do we support extended CDBs
+ adapter->max_cdb_sz = 10;
+ if (megaraid_mbox_extended_cdb(adapter) == 0) {
+ adapter->max_cdb_sz = 16;
+ }
+
+ /*
+ * Do we support cluster environment, if we do, what is the initiator
+ * id.
+ * NOTE: In a non-cluster aware firmware environment, the LLD should
+ * return 7 as initiator id.
+ */
+ adapter->ha = 0;
+ adapter->init_id = -1;
+ if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) {
+ adapter->ha = 1;
+ }
+
+ /*
+ * Prepare the device ids array to have the mapping between the kernel
+ * device address and megaraid device address.
+ * We export the physical devices on their actual addresses. The
+ * logical drives are exported on a virtual SCSI channel
+ */
+ megaraid_mbox_setup_device_map(adapter);
+
+ // If the firmware supports random deletion, update the device id map
+ if (megaraid_mbox_support_random_del(adapter)) {
+
+ // Change the logical drives numbers in device_ids array one
+ // slot in device_ids is reserved for target id, that's why
+ // "<=" below
+ for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) {
+ adapter->device_ids[adapter->max_channel][i] += 0x80;
+ }
+ adapter->device_ids[adapter->max_channel][adapter->init_id] =
+ 0xFF;
+
+ raid_dev->random_del_supported = 1;
+ }
+
+ /*
+ * find out the maximum number of scatter-gather elements supported by
+ * this firmware
+ */
+ adapter->sglen = megaraid_mbox_get_max_sg(adapter);
+
+ // enumerate RAID and SCSI channels so that all devices on SCSI
+ // channels can later be exported, including disk devices
+ megaraid_mbox_enum_raid_scsi(adapter);
+
+ /*
+ * Other parameters required by upper layer
+ *
+ * maximum number of sectors per IO command
+ */
+ adapter->max_sectors = megaraid_max_sectors;
+
+ /*
+ * number of queued commands per LUN.
+ */
+ adapter->cmd_per_lun = megaraid_cmd_per_lun;
+
+ /*
+ * Allocate resources required to issue FW calls, when sysfs is
+ * accessed
+ */
+ if (megaraid_sysfs_alloc_resources(adapter) != 0)
+ goto out_free_irq;
+
+ // Set the DMA mask to 64-bit. All supported controllers as capable of
+ // DMA in this range
+ pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
+
+ if (((magic64 == HBA_SIGNATURE_64_BIT) &&
+ ((adapter->pdev->subsystem_device !=
+ PCI_SUBSYS_ID_MEGARAID_SATA_150_6) &&
+ (adapter->pdev->subsystem_device !=
+ PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
+ adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
+ adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
+ if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: DMA mask for 64-bit failed\n"));
+
+ if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: 32-bit DMA mask failed\n"));
+ goto out_free_sysfs_res;
+ }
+ }
+ }
+
+ // setup tasklet for DPC
+ tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc,
+ (unsigned long)adapter);
+
+ con_log(CL_DLEVEL1, (KERN_INFO
+ "megaraid mbox hba successfully initialized\n"));
+
+ return 0;
+
+out_free_sysfs_res:
+ megaraid_sysfs_free_resources(adapter);
+out_free_irq:
+ free_irq(adapter->irq, adapter);
+out_alloc_cmds:
+ megaraid_free_cmd_packets(adapter);
+out_iounmap:
+ iounmap(raid_dev->baseaddr);
+out_release_regions:
+ pci_release_regions(pdev);
+out_free_raid_dev:
+ kfree(raid_dev);
+
+ return -1;
+}
+
+
+/**
+ * megaraid_fini_mbox - undo controller initialization
+ * @adapter : our soft state
+ */
+static void
+megaraid_fini_mbox(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+ // flush all caches
+ megaraid_mbox_flush_cache(adapter);
+
+ tasklet_kill(&adapter->dpc_h);
+
+ megaraid_sysfs_free_resources(adapter);
+
+ megaraid_free_cmd_packets(adapter);
+
+ free_irq(adapter->irq, adapter);
+
+ iounmap(raid_dev->baseaddr);
+
+ pci_release_regions(adapter->pdev);
+
+ kfree(raid_dev);
+
+ return;
+}
+
+
+/**
+ * megaraid_alloc_cmd_packets - allocate shared mailbox
+ * @adapter : soft state of the raid controller
+ *
+ * Allocate and align the shared mailbox. This maibox is used to issue
+ * all the commands. For IO based controllers, the mailbox is also registered
+ * with the FW. Allocate memory for all commands as well.
+ * This is our big allocator.
+ */
+static int
+megaraid_alloc_cmd_packets(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ struct pci_dev *pdev;
+ unsigned long align;
+ scb_t *scb;
+ mbox_ccb_t *ccb;
+ struct mraid_pci_blk *epthru_pci_blk;
+ struct mraid_pci_blk *sg_pci_blk;
+ struct mraid_pci_blk *mbox_pci_blk;
+ int i;
+
+ pdev = adapter->pdev;
+
+ /*
+ * Setup the mailbox
+ * Allocate the common 16-byte aligned memory for the handshake
+ * mailbox.
+ */
+ raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev,
+ sizeof(mbox64_t),
+ &raid_dev->una_mbox64_dma);
+
+ if (!raid_dev->una_mbox64) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+ return -1;
+ }
+
+ /*
+ * Align the mailbox at 16-byte boundary
+ */
+ raid_dev->mbox = &raid_dev->una_mbox64->mbox32;
+
+ raid_dev->mbox = (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) &
+ (~0UL ^ 0xFUL));
+
+ raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8);
+
+ align = ((void *)raid_dev->mbox -
+ ((void *)&raid_dev->una_mbox64->mbox32));
+
+ raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 +
+ align;
+
+ // Allocate memory for commands issued internally
+ adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE,
+ &adapter->ibuf_dma_h);
+ if (!adapter->ibuf) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+
+ goto out_free_common_mbox;
+ }
+
+ // Allocate memory for our SCSI Command Blocks and their associated
+ // memory
+
+ /*
+ * Allocate memory for the base list of scb. Later allocate memory for
+ * CCBs and embedded components of each CCB and point the pointers in
+ * scb to the allocated components
+ * NOTE: The code to allocate SCB will be duplicated in all the LLD
+ * since the calling routine does not yet know the number of available
+ * commands.
+ */
+ adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL);
+
+ if (adapter->kscb_list == NULL) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+ goto out_free_ibuf;
+ }
+
+ // memory allocation for our command packets
+ if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+ goto out_free_scb_list;
+ }
+
+ // Adjust the scb pointers and link in the free pool
+ epthru_pci_blk = raid_dev->epthru_pool;
+ sg_pci_blk = raid_dev->sg_pool;
+ mbox_pci_blk = raid_dev->mbox_pool;
+
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+ scb = adapter->kscb_list + i;
+ ccb = raid_dev->ccb_list + i;
+
+ ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16);
+ ccb->raw_mbox = (uint8_t *)ccb->mbox;
+ ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8);
+ ccb->mbox_dma_h = (unsigned long)mbox_pci_blk[i].dma_addr + 16;
+
+ // make sure the mailbox is aligned properly
+ if (ccb->mbox_dma_h & 0x0F) {
+ con_log(CL_ANN, (KERN_CRIT
+ "megaraid mbox: not aligned on 16-bytes\n"));
+
+ goto out_teardown_dma_pools;
+ }
+
+ ccb->epthru = (mraid_epassthru_t *)
+ epthru_pci_blk[i].vaddr;
+ ccb->epthru_dma_h = epthru_pci_blk[i].dma_addr;
+ ccb->pthru = (mraid_passthru_t *)ccb->epthru;
+ ccb->pthru_dma_h = ccb->epthru_dma_h;
+
+
+ ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr;
+ ccb->sgl_dma_h = sg_pci_blk[i].dma_addr;
+ ccb->sgl32 = (mbox_sgl32 *)ccb->sgl64;
+
+ scb->ccb = (caddr_t)ccb;
+ scb->gp = 0;
+
+ scb->sno = i; // command index
+
+ scb->scp = NULL;
+ scb->state = SCB_FREE;
+ scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_type = MRAID_DMA_NONE;
+ scb->dev_channel = -1;
+ scb->dev_target = -1;
+
+ // put scb in the free pool
+ list_add_tail(&scb->list, &adapter->kscb_pool);
+ }
+
+ return 0;
+
+out_teardown_dma_pools:
+ megaraid_mbox_teardown_dma_pools(adapter);
+out_free_scb_list:
+ kfree(adapter->kscb_list);
+out_free_ibuf:
+ pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
+ adapter->ibuf_dma_h);
+out_free_common_mbox:
+ pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
+
+ return -1;
+}
+
+
+/**
+ * megaraid_free_cmd_packets - free memory
+ * @adapter : soft state of the raid controller
+ *
+ * Release memory resources allocated for commands.
+ */
+static void
+megaraid_free_cmd_packets(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+ megaraid_mbox_teardown_dma_pools(adapter);
+
+ kfree(adapter->kscb_list);
+
+ pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
+ (void *)adapter->ibuf, adapter->ibuf_dma_h);
+
+ pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
+ return;
+}
+
+
+/**
+ * megaraid_mbox_setup_dma_pools - setup dma pool for command packets
+ * @adapter : HBA soft state
+ *
+ * Setup the dma pools for mailbox, passthru and extended passthru structures,
+ * and scatter-gather lists.
+ */
+static int
+megaraid_mbox_setup_dma_pools(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ struct mraid_pci_blk *epthru_pci_blk;
+ struct mraid_pci_blk *sg_pci_blk;
+ struct mraid_pci_blk *mbox_pci_blk;
+ int i;
+
+
+
+ // Allocate memory for 16-bytes aligned mailboxes
+ raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
+ adapter->pdev,
+ sizeof(mbox64_t) + 16,
+ 16, 0);
+
+ if (raid_dev->mbox_pool_handle == NULL) {
+ goto fail_setup_dma_pool;
+ }
+
+ mbox_pci_blk = raid_dev->mbox_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+ mbox_pci_blk[i].vaddr = pci_pool_alloc(
+ raid_dev->mbox_pool_handle,
+ GFP_KERNEL,
+ &mbox_pci_blk[i].dma_addr);
+ if (!mbox_pci_blk[i].vaddr) {
+ goto fail_setup_dma_pool;
+ }
+ }
+
+ /*
+ * Allocate memory for each embedded passthru strucuture pointer
+ * Request for a 128 bytes aligned structure for each passthru command
+ * structure
+ * Since passthru and extended passthru commands are exclusive, they
+ * share common memory pool. Passthru structures piggyback on memory
+ * allocted to extended passthru since passthru is smaller of the two
+ */
+ raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
+ adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);
+
+ if (raid_dev->epthru_pool_handle == NULL) {
+ goto fail_setup_dma_pool;
+ }
+
+ epthru_pci_blk = raid_dev->epthru_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+ epthru_pci_blk[i].vaddr = pci_pool_alloc(
+ raid_dev->epthru_pool_handle,
+ GFP_KERNEL,
+ &epthru_pci_blk[i].dma_addr);
+ if (!epthru_pci_blk[i].vaddr) {
+ goto fail_setup_dma_pool;
+ }
+ }
+
+
+ // Allocate memory for each scatter-gather list. Request for 512 bytes
+ // alignment for each sg list
+ raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
+ adapter->pdev,
+ sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
+ 512, 0);
+
+ if (raid_dev->sg_pool_handle == NULL) {
+ goto fail_setup_dma_pool;
+ }
+
+ sg_pci_blk = raid_dev->sg_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+ sg_pci_blk[i].vaddr = pci_pool_alloc(
+ raid_dev->sg_pool_handle,
+ GFP_KERNEL,
+ &sg_pci_blk[i].dma_addr);
+ if (!sg_pci_blk[i].vaddr) {
+ goto fail_setup_dma_pool;
+ }
+ }
+
+ return 0;
+
+fail_setup_dma_pool:
+ megaraid_mbox_teardown_dma_pools(adapter);
+ return -1;
+}
+
+
+/**
+ * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets
+ * @adapter : HBA soft state
+ *
+ * Teardown the dma pool for mailbox, passthru and extended passthru
+ * structures, and scatter-gather lists.
+ */
+static void
+megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ struct mraid_pci_blk *epthru_pci_blk;
+ struct mraid_pci_blk *sg_pci_blk;
+ struct mraid_pci_blk *mbox_pci_blk;
+ int i;
+
+
+ sg_pci_blk = raid_dev->sg_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
+ pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
+ sg_pci_blk[i].dma_addr);
+ }
+ if (raid_dev->sg_pool_handle)
+ pci_pool_destroy(raid_dev->sg_pool_handle);
+
+
+ epthru_pci_blk = raid_dev->epthru_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
+ pci_pool_free(raid_dev->epthru_pool_handle,
+ epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
+ }
+ if (raid_dev->epthru_pool_handle)
+ pci_pool_destroy(raid_dev->epthru_pool_handle);
+
+
+ mbox_pci_blk = raid_dev->mbox_pool;
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
+ pci_pool_free(raid_dev->mbox_pool_handle,
+ mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
+ }
+ if (raid_dev->mbox_pool_handle)
+ pci_pool_destroy(raid_dev->mbox_pool_handle);
+
+ return;
+}
+
+
+/**
+ * megaraid_alloc_scb - detach and return a scb from the free list
+ * @adapter : controller's soft state
+ * @scp : pointer to the scsi command to be executed
+ *
+ * Return the scb from the head of the free list. %NULL if there are none
+ * available.
+ */
+static scb_t *
+megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
+{
+ struct list_head *head = &adapter->kscb_pool;
+ scb_t *scb = NULL;
+ unsigned long flags;
+
+ // detach scb from free pool
+ spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
+ return NULL;
+ }
+
+ scb = list_entry(head->next, scb_t, list);
+ list_del_init(&scb->list);
+
+ spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+ scb->state = SCB_ACTIVE;
+ scb->scp = scp;
+ scb->dma_type = MRAID_DMA_NONE;
+
+ return scb;
+}
+
+
+/**
+ * megaraid_dealloc_scb - return the scb to the free pool
+ * @adapter : controller's soft state
+ * @scb : scb to be freed
+ *
+ * Return the scb back to the free list of scbs. The caller must 'flush' the
+ * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
+ * NOTE NOTE: Make sure the scb is not on any list before calling this
+ * routine.
+ */
+static inline void
+megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
+{
+ unsigned long flags;
+
+ // put scb in the free pool
+ scb->state = SCB_FREE;
+ scb->scp = NULL;
+ spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+ list_add(&scb->list, &adapter->kscb_pool);
+
+ spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_mksgl - make the scatter-gather list
+ * @adapter : controller's soft state
+ * @scb : scsi control block
+ *
+ * Prepare the scatter-gather list.
+ */
+static int
+megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
+{
+ struct scatterlist *sgl;
+ mbox_ccb_t *ccb;
+ struct scsi_cmnd *scp;
+ int sgcnt;
+ int i;
+
+
+ scp = scb->scp;
+ ccb = (mbox_ccb_t *)scb->ccb;
+
+ sgcnt = scsi_dma_map(scp);
+ BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);
+
+ // no mapping required if no data to be transferred
+ if (!sgcnt)
+ return 0;
+
+ scb->dma_type = MRAID_DMA_WSG;
+
+ scsi_for_each_sg(scp, sgl, sgcnt, i) {
+ ccb->sgl64[i].address = sg_dma_address(sgl);
+ ccb->sgl64[i].length = sg_dma_len(sgl);
+ }
+
+ // Return count of SG nodes
+ return sgcnt;
+}
+
+
+/**
+ * mbox_post_cmd - issue a mailbox command
+ * @adapter : controller's soft state
+ * @scb : command to be issued
+ *
+ * Post the command to the controller if mailbox is available.
+ */
+static int
+mbox_post_cmd(adapter_t *adapter, scb_t *scb)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox64_t *mbox64;
+ mbox_t *mbox;
+ mbox_ccb_t *ccb;
+ unsigned long flags;
+ unsigned int i = 0;
+
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ mbox = raid_dev->mbox;
+ mbox64 = raid_dev->mbox64;
+
+ /*
+ * Check for busy mailbox. If it is, return failure - the caller
+ * should retry later.
+ */
+ spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
+
+ if (unlikely(mbox->busy)) {
+ do {
+ udelay(1);
+ i++;
+ rmb();
+ } while(mbox->busy && (i < max_mbox_busy_wait));
+
+ if (mbox->busy) {
+
+ spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
+
+ return -1;
+ }
+ }
+
+
+ // Copy this command's mailbox data into "adapter's" mailbox
+ memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);
+ mbox->cmdid = scb->sno;
+
+ adapter->outstanding_cmds++;
+
+ if (scb->dma_direction == PCI_DMA_TODEVICE)
+ pci_dma_sync_sg_for_device(adapter->pdev,
+ scsi_sglist(scb->scp),
+ scsi_sg_count(scb->scp),
+ PCI_DMA_TODEVICE);
+
+ mbox->busy = 1; // Set busy
+ mbox->poll = 0;
+ mbox->ack = 0;
+ wmb();
+
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+ spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
+
+ return 0;
+}
+
+
+/**
+ * megaraid_queue_command - generic queue entry point for all LLDs
+ * @scp : pointer to the scsi command to be executed
+ * @done : callback routine to be called after the cmd has be completed
+ *
+ * Queue entry point for mailbox based controllers.
+ */
+static int
+megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
+{
+ adapter_t *adapter;
+ scb_t *scb;
+ int if_busy;
+
+ adapter = SCP2ADAPTER(scp);
+ scp->scsi_done = done;
+ scp->result = 0;
+
+ /*
+ * Allocate and build a SCB request
+ * if_busy flag will be set if megaraid_mbox_build_cmd() command could
+ * not allocate scb. We will return non-zero status in that case.
+ * NOTE: scb can be null even though certain commands completed
+ * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would
+ * return 0 in that case, and we would do the callback right away.
+ */
+ if_busy = 0;
+ scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy);
+ if (!scb) { // command already completed
+ done(scp);
+ return 0;
+ }
+
+ megaraid_mbox_runpendq(adapter, scb);
+ return if_busy;
+}
+
+static DEF_SCSI_QCMD(megaraid_queue_command)
+
+/**
+ * megaraid_mbox_build_cmd - transform the mid-layer scsi commands
+ * @adapter : controller's soft state
+ * @scp : mid-layer scsi command pointer
+ * @busy : set if request could not be completed because of lack of
+ * resources
+ *
+ * Transform the mid-layer scsi command to megaraid firmware lingua.
+ * Convert the command issued by mid-layer to format understood by megaraid
+ * firmware. We also complete certain commands without sending them to firmware.
+ */
+static scb_t *
+megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
+{
+ mraid_device_t *rdev = ADAP2RAIDDEV(adapter);
+ int channel;
+ int target;
+ int islogical;
+ mbox_ccb_t *ccb;
+ mraid_passthru_t *pthru;
+ mbox64_t *mbox64;
+ mbox_t *mbox;
+ scb_t *scb;
+ char skip[] = "skipping";
+ char scan[] = "scanning";
+ char *ss;
+
+
+ /*
+ * Get the appropriate device map for the device this command is
+ * intended for
+ */
+ MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical);
+
+ /*
+ * Logical drive commands
+ */
+ if (islogical) {
+ switch (scp->cmnd[0]) {
+ case TEST_UNIT_READY:
+ /*
+ * Do we support clustering and is the support enabled
+ * If no, return success always
+ */
+ if (!adapter->ha) {
+ scp->result = (DID_OK << 16);
+ return NULL;
+ }
+
+ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+ scp->result = (DID_ERROR << 16);
+ *busy = 1;
+ return NULL;
+ }
+
+ scb->dma_direction = scp->sc_data_direction;
+ scb->dev_channel = 0xFF;
+ scb->dev_target = target;
+ ccb = (mbox_ccb_t *)scb->ccb;
+
+ /*
+ * The command id will be provided by the command
+ * issuance routine
+ */
+ ccb->raw_mbox[0] = CLUSTER_CMD;
+ ccb->raw_mbox[2] = RESERVATION_STATUS;
+ ccb->raw_mbox[3] = target;
+
+ return scb;
+
+ case MODE_SENSE:
+ {
+ struct scatterlist *sgl;
+ caddr_t vaddr;
+
+ sgl = scsi_sglist(scp);
+ if (sg_page(sgl)) {
+ vaddr = (caddr_t) sg_virt(&sgl[0]);
+
+ memset(vaddr, 0, scp->cmnd[4]);
+ }
+ else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: invalid sg:%d\n",
+ __LINE__));
+ }
+ }
+ scp->result = (DID_OK << 16);
+ return NULL;
+
+ case INQUIRY:
+ /*
+ * Display the channel scan for logical drives
+ * Do not display scan for a channel if already done.
+ */
+ if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
+
+ con_log(CL_ANN, (KERN_INFO
+ "scsi[%d]: scanning scsi channel %d",
+ adapter->host->host_no,
+ SCP2CHANNEL(scp)));
+
+ con_log(CL_ANN, (
+ " [virtual] for logical drives\n"));
+
+ rdev->last_disp |= (1L << SCP2CHANNEL(scp));
+ }
+
+ if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
+ scp->sense_buffer[0] = 0x70;
+ scp->sense_buffer[2] = ILLEGAL_REQUEST;
+ scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
+ scp->result = CHECK_CONDITION << 1;
+ return NULL;
+ }
+
+ /* Fall through */
+
+ case READ_CAPACITY:
+ /*
+ * Do not allow LUN > 0 for logical drives and
+ * requests for more than 40 logical drives
+ */
+ if (SCP2LUN(scp)) {
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+ if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) {
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+
+
+ /* Allocate a SCB and initialize passthru */
+ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+ scp->result = (DID_ERROR << 16);
+ *busy = 1;
+ return NULL;
+ }
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ scb->dev_channel = 0xFF;
+ scb->dev_target = target;
+ pthru = ccb->pthru;
+ mbox = ccb->mbox;
+ mbox64 = ccb->mbox64;
+
+ pthru->timeout = 0;
+ pthru->ars = 1;
+ pthru->reqsenselen = 14;
+ pthru->islogical = 1;
+ pthru->logdrv = target;
+ pthru->cdblen = scp->cmd_len;
+ memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+ mbox->cmd = MBOXCMD_PASSTHRU64;
+ scb->dma_direction = scp->sc_data_direction;
+
+ pthru->dataxferlen = scsi_bufflen(scp);
+ pthru->dataxferaddr = ccb->sgl_dma_h;
+ pthru->numsge = megaraid_mbox_mksgl(adapter,
+ scb);
+
+ mbox->xferaddr = 0xFFFFFFFF;
+ mbox64->xferaddr_lo = (uint32_t )ccb->pthru_dma_h;
+ mbox64->xferaddr_hi = 0;
+
+ return scb;
+
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+
+ /*
+ * Allocate a SCB and initialize mailbox
+ */
+ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+ scp->result = (DID_ERROR << 16);
+ *busy = 1;
+ return NULL;
+ }
+ ccb = (mbox_ccb_t *)scb->ccb;
+ scb->dev_channel = 0xFF;
+ scb->dev_target = target;
+ mbox = ccb->mbox;
+ mbox64 = ccb->mbox64;
+ mbox->logdrv = target;
+
+ /*
+ * A little HACK: 2nd bit is zero for all scsi read
+ * commands and is set for all scsi write commands
+ */
+ mbox->cmd = (scp->cmnd[0] & 0x02) ? MBOXCMD_LWRITE64:
+ MBOXCMD_LREAD64 ;
+
+ /*
+ * 6-byte READ(0x08) or WRITE(0x0A) cdb
+ */
+ if (scp->cmd_len == 6) {
+ mbox->numsectors = (uint32_t)scp->cmnd[4];
+ mbox->lba =
+ ((uint32_t)scp->cmnd[1] << 16) |
+ ((uint32_t)scp->cmnd[2] << 8) |
+ (uint32_t)scp->cmnd[3];
+
+ mbox->lba &= 0x1FFFFF;
+ }
+
+ /*
+ * 10-byte READ(0x28) or WRITE(0x2A) cdb
+ */
+ else if (scp->cmd_len == 10) {
+ mbox->numsectors =
+ (uint32_t)scp->cmnd[8] |
+ ((uint32_t)scp->cmnd[7] << 8);
+ mbox->lba =
+ ((uint32_t)scp->cmnd[2] << 24) |
+ ((uint32_t)scp->cmnd[3] << 16) |
+ ((uint32_t)scp->cmnd[4] << 8) |
+ (uint32_t)scp->cmnd[5];
+ }
+
+ /*
+ * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+ */
+ else if (scp->cmd_len == 12) {
+ mbox->lba =
+ ((uint32_t)scp->cmnd[2] << 24) |
+ ((uint32_t)scp->cmnd[3] << 16) |
+ ((uint32_t)scp->cmnd[4] << 8) |
+ (uint32_t)scp->cmnd[5];
+
+ mbox->numsectors =
+ ((uint32_t)scp->cmnd[6] << 24) |
+ ((uint32_t)scp->cmnd[7] << 16) |
+ ((uint32_t)scp->cmnd[8] << 8) |
+ (uint32_t)scp->cmnd[9];
+ }
+ else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: unsupported CDB length\n"));
+
+ megaraid_dealloc_scb(adapter, scb);
+
+ scp->result = (DID_ERROR << 16);
+ return NULL;
+ }
+
+ scb->dma_direction = scp->sc_data_direction;
+
+ // Calculate Scatter-Gather info
+ mbox64->xferaddr_lo = (uint32_t )ccb->sgl_dma_h;
+ mbox->numsge = megaraid_mbox_mksgl(adapter,
+ scb);
+ mbox->xferaddr = 0xFFFFFFFF;
+ mbox64->xferaddr_hi = 0;
+
+ return scb;
+
+ case RESERVE:
+ case RELEASE:
+ /*
+ * Do we support clustering and is the support enabled
+ */
+ if (!adapter->ha) {
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+
+ /*
+ * Allocate a SCB and initialize mailbox
+ */
+ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+ scp->result = (DID_ERROR << 16);
+ *busy = 1;
+ return NULL;
+ }
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ scb->dev_channel = 0xFF;
+ scb->dev_target = target;
+ ccb->raw_mbox[0] = CLUSTER_CMD;
+ ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ?
+ RESERVE_LD : RELEASE_LD;
+
+ ccb->raw_mbox[3] = target;
+ scb->dma_direction = scp->sc_data_direction;
+
+ return scb;
+
+ default:
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+ }
+ else { // Passthru device commands
+
+ // Do not allow access to target id > 15 or LUN > 7
+ if (target > 15 || SCP2LUN(scp) > 7) {
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+
+ // if fast load option was set and scan for last device is
+ // over, reset the fast_load flag so that during a possible
+ // next scan, devices can be made available
+ if (rdev->fast_load && (target == 15) &&
+ (SCP2CHANNEL(scp) == adapter->max_channel -1)) {
+
+ con_log(CL_ANN, (KERN_INFO
+ "megaraid[%d]: physical device scan re-enabled\n",
+ adapter->host->host_no));
+ rdev->fast_load = 0;
+ }
+
+ /*
+ * Display the channel scan for physical devices
+ */
+ if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
+
+ ss = rdev->fast_load ? skip : scan;
+
+ con_log(CL_ANN, (KERN_INFO
+ "scsi[%d]: %s scsi channel %d [Phy %d]",
+ adapter->host->host_no, ss, SCP2CHANNEL(scp),
+ channel));
+
+ con_log(CL_ANN, (
+ " for non-raid devices\n"));
+
+ rdev->last_disp |= (1L << SCP2CHANNEL(scp));
+ }
+
+ // disable channel sweep if fast load option given
+ if (rdev->fast_load) {
+ scp->result = (DID_BAD_TARGET << 16);
+ return NULL;
+ }
+
+ // Allocate a SCB and initialize passthru
+ if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+ scp->result = (DID_ERROR << 16);
+ *busy = 1;
+ return NULL;
+ }
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ scb->dev_channel = channel;
+ scb->dev_target = target;
+ scb->dma_direction = scp->sc_data_direction;
+ mbox = ccb->mbox;
+ mbox64 = ccb->mbox64;
+
+ // Does this firmware support extended CDBs
+ if (adapter->max_cdb_sz == 16) {
+ mbox->cmd = MBOXCMD_EXTPTHRU;
+
+ megaraid_mbox_prepare_epthru(adapter, scb, scp);
+
+ mbox64->xferaddr_lo = (uint32_t)ccb->epthru_dma_h;
+ mbox64->xferaddr_hi = 0;
+ mbox->xferaddr = 0xFFFFFFFF;
+ }
+ else {
+ mbox->cmd = MBOXCMD_PASSTHRU64;
+
+ megaraid_mbox_prepare_pthru(adapter, scb, scp);
+
+ mbox64->xferaddr_lo = (uint32_t)ccb->pthru_dma_h;
+ mbox64->xferaddr_hi = 0;
+ mbox->xferaddr = 0xFFFFFFFF;
+ }
+ return scb;
+ }
+
+ // NOT REACHED
+}
+
+
+/**
+ * megaraid_mbox_runpendq - execute commands queued in the pending queue
+ * @adapter : controller's soft state
+ * @scb_q : SCB to be queued in the pending list
+ *
+ * Scan the pending list for commands which are not yet issued and try to
+ * post to the controller. The SCB can be a null pointer, which would indicate
+ * no SCB to be queue, just try to execute the ones in the pending list.
+ *
+ * NOTE: We do not actually traverse the pending list. The SCBs are plucked
+ * out from the head of the pending list. If it is successfully issued, the
+ * next SCB is at the head now.
+ */
+static void
+megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
+{
+ scb_t *scb;
+ unsigned long flags;
+
+ spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+
+ if (scb_q) {
+ scb_q->state = SCB_PENDQ;
+ list_add_tail(&scb_q->list, &adapter->pend_list);
+ }
+
+ // if the adapter in not in quiescent mode, post the commands to FW
+ if (adapter->quiescent) {
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+ return;
+ }
+
+ while (!list_empty(&adapter->pend_list)) {
+
+ assert_spin_locked(PENDING_LIST_LOCK(adapter));
+
+ scb = list_entry(adapter->pend_list.next, scb_t, list);
+
+ // remove the scb from the pending list and try to
+ // issue. If we are unable to issue it, put back in
+ // the pending list and return
+
+ list_del_init(&scb->list);
+
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+ // if mailbox was busy, return SCB back to pending
+ // list. Make sure to add at the head, since that's
+ // where it would have been removed from
+
+ scb->state = SCB_ISSUED;
+
+ if (mbox_post_cmd(adapter, scb) != 0) {
+
+ spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+
+ scb->state = SCB_PENDQ;
+
+ list_add(&scb->list, &adapter->pend_list);
+
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
+ flags);
+
+ return;
+ }
+
+ spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+ }
+
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_prepare_pthru - prepare a command for physical devices
+ * @adapter : pointer to controller's soft state
+ * @scb : scsi control block
+ * @scp : scsi command from the mid-layer
+ *
+ * Prepare a command for the scsi physical devices.
+ */
+static void
+megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
+ struct scsi_cmnd *scp)
+{
+ mbox_ccb_t *ccb;
+ mraid_passthru_t *pthru;
+ uint8_t channel;
+ uint8_t target;
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ pthru = ccb->pthru;
+ channel = scb->dev_channel;
+ target = scb->dev_target;
+
+ // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
+ pthru->timeout = 4;
+ pthru->ars = 1;
+ pthru->islogical = 0;
+ pthru->channel = 0;
+ pthru->target = (channel << 4) | target;
+ pthru->logdrv = SCP2LUN(scp);
+ pthru->reqsenselen = 14;
+ pthru->cdblen = scp->cmd_len;
+
+ memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+ if (scsi_bufflen(scp)) {
+ pthru->dataxferlen = scsi_bufflen(scp);
+ pthru->dataxferaddr = ccb->sgl_dma_h;
+ pthru->numsge = megaraid_mbox_mksgl(adapter, scb);
+ }
+ else {
+ pthru->dataxferaddr = 0;
+ pthru->dataxferlen = 0;
+ pthru->numsge = 0;
+ }
+ return;
+}
+
+
+/**
+ * megaraid_mbox_prepare_epthru - prepare a command for physical devices
+ * @adapter : pointer to controller's soft state
+ * @scb : scsi control block
+ * @scp : scsi command from the mid-layer
+ *
+ * Prepare a command for the scsi physical devices. This routine prepares
+ * commands for devices which can take extended CDBs (>10 bytes).
+ */
+static void
+megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
+ struct scsi_cmnd *scp)
+{
+ mbox_ccb_t *ccb;
+ mraid_epassthru_t *epthru;
+ uint8_t channel;
+ uint8_t target;
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ epthru = ccb->epthru;
+ channel = scb->dev_channel;
+ target = scb->dev_target;
+
+ // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
+ epthru->timeout = 4;
+ epthru->ars = 1;
+ epthru->islogical = 0;
+ epthru->channel = 0;
+ epthru->target = (channel << 4) | target;
+ epthru->logdrv = SCP2LUN(scp);
+ epthru->reqsenselen = 14;
+ epthru->cdblen = scp->cmd_len;
+
+ memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
+
+ if (scsi_bufflen(scp)) {
+ epthru->dataxferlen = scsi_bufflen(scp);
+ epthru->dataxferaddr = ccb->sgl_dma_h;
+ epthru->numsge = megaraid_mbox_mksgl(adapter, scb);
+ }
+ else {
+ epthru->dataxferaddr = 0;
+ epthru->dataxferlen = 0;
+ epthru->numsge = 0;
+ }
+ return;
+}
+
+
+/**
+ * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs
+ * @adapter : controller's soft state
+ *
+ * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the
+ * completed command and put them on the completed list for later processing.
+ *
+ * Returns: 1 if the interrupt is valid, 0 otherwise
+ */
+static int
+megaraid_ack_sequence(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox_t *mbox;
+ scb_t *scb;
+ uint8_t nstatus;
+ uint8_t completed[MBOX_MAX_FIRMWARE_STATUS];
+ struct list_head clist;
+ int handled;
+ uint32_t dword;
+ unsigned long flags;
+ int i, j;
+
+
+ mbox = raid_dev->mbox;
+
+ // move the SCBs from the firmware completed array to our local list
+ INIT_LIST_HEAD(&clist);
+
+ // loop till F/W has more commands for us to complete
+ handled = 0;
+ spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
+ do {
+ /*
+ * Check if a valid interrupt is pending. If found, force the
+ * interrupt line low.
+ */
+ dword = RDOUTDOOR(raid_dev);
+ if (dword != 0x10001234) break;
+
+ handled = 1;
+
+ WROUTDOOR(raid_dev, 0x10001234);
+
+ nstatus = 0;
+ // wait for valid numstatus to post
+ for (i = 0; i < 0xFFFFF; i++) {
+ if (mbox->numstatus != 0xFF) {
+ nstatus = mbox->numstatus;
+ break;
+ }
+ rmb();
+ }
+ mbox->numstatus = 0xFF;
+
+ adapter->outstanding_cmds -= nstatus;
+
+ for (i = 0; i < nstatus; i++) {
+
+ // wait for valid command index to post
+ for (j = 0; j < 0xFFFFF; j++) {
+ if (mbox->completed[i] != 0xFF) break;
+ rmb();
+ }
+ completed[i] = mbox->completed[i];
+ mbox->completed[i] = 0xFF;
+
+ if (completed[i] == 0xFF) {
+ con_log(CL_ANN, (KERN_CRIT
+ "megaraid: command posting timed out\n"));
+
+ BUG();
+ continue;
+ }
+
+ // Get SCB associated with this command id
+ if (completed[i] >= MBOX_MAX_SCSI_CMDS) {
+ // a cmm command
+ scb = adapter->uscb_list + (completed[i] -
+ MBOX_MAX_SCSI_CMDS);
+ }
+ else {
+ // an os command
+ scb = adapter->kscb_list + completed[i];
+ }
+
+ scb->status = mbox->status;
+ list_add_tail(&scb->list, &clist);
+ }
+
+ // Acknowledge interrupt
+ WRINDOOR(raid_dev, 0x02);
+
+ } while(1);
+
+ spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
+
+
+ // put the completed commands in the completed list. DPC would
+ // complete these commands later
+ spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
+
+ list_splice(&clist, &adapter->completed_list);
+
+ spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
+
+
+ // schedule the DPC if there is some work for it
+ if (handled)
+ tasklet_schedule(&adapter->dpc_h);
+
+ return handled;
+}
+
+
+/**
+ * megaraid_isr - isr for memory based mailbox based controllers
+ * @irq : irq
+ * @devp : pointer to our soft state
+ *
+ * Interrupt service routine for memory-mapped mailbox controllers.
+ */
+static irqreturn_t
+megaraid_isr(int irq, void *devp)
+{
+ adapter_t *adapter = devp;
+ int handled;
+
+ handled = megaraid_ack_sequence(adapter);
+
+ /* Loop through any pending requests */
+ if (!adapter->quiescent) {
+ megaraid_mbox_runpendq(adapter, NULL);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+
+/**
+ * megaraid_mbox_sync_scb - sync kernel buffers
+ * @adapter : controller's soft state
+ * @scb : pointer to the resource packet
+ *
+ * DMA sync if required.
+ */
+static void
+megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
+{
+ mbox_ccb_t *ccb;
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+
+ if (scb->dma_direction == PCI_DMA_FROMDEVICE)
+ pci_dma_sync_sg_for_cpu(adapter->pdev,
+ scsi_sglist(scb->scp),
+ scsi_sg_count(scb->scp),
+ PCI_DMA_FROMDEVICE);
+
+ scsi_dma_unmap(scb->scp);
+ return;
+}
+
+
+/**
+ * megaraid_mbox_dpc - the tasklet to complete the commands from completed list
+ * @devp : pointer to HBA soft state
+ *
+ * Pick up the commands from the completed list and send back to the owners.
+ * This is a reentrant function and does not assume any locks are held while
+ * it is being called.
+ */
+static void
+megaraid_mbox_dpc(unsigned long devp)
+{
+ adapter_t *adapter = (adapter_t *)devp;
+ mraid_device_t *raid_dev;
+ struct list_head clist;
+ struct scatterlist *sgl;
+ scb_t *scb;
+ scb_t *tmp;
+ struct scsi_cmnd *scp;
+ mraid_passthru_t *pthru;
+ mraid_epassthru_t *epthru;
+ mbox_ccb_t *ccb;
+ int islogical;
+ int pdev_index;
+ int pdev_state;
+ mbox_t *mbox;
+ unsigned long flags;
+ uint8_t c;
+ int status;
+ uioc_t *kioc;
+
+
+ if (!adapter) return;
+
+ raid_dev = ADAP2RAIDDEV(adapter);
+
+ // move the SCBs from the completed list to our local list
+ INIT_LIST_HEAD(&clist);
+
+ spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
+
+ list_splice_init(&adapter->completed_list, &clist);
+
+ spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
+
+
+ list_for_each_entry_safe(scb, tmp, &clist, list) {
+
+ status = scb->status;
+ scp = scb->scp;
+ ccb = (mbox_ccb_t *)scb->ccb;
+ pthru = ccb->pthru;
+ epthru = ccb->epthru;
+ mbox = ccb->mbox;
+
+ // Make sure f/w has completed a valid command
+ if (scb->state != SCB_ISSUED) {
+ con_log(CL_ANN, (KERN_CRIT
+ "megaraid critical err: invalid command %d:%d:%p\n",
+ scb->sno, scb->state, scp));
+ BUG();
+ continue; // Must never happen!
+ }
+
+ // check for the management command and complete it right away
+ if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
+ scb->state = SCB_FREE;
+ scb->status = status;
+
+ // remove from local clist
+ list_del_init(&scb->list);
+
+ kioc = (uioc_t *)scb->gp;
+ kioc->status = 0;
+
+ megaraid_mbox_mm_done(adapter, scb);
+
+ continue;
+ }
+
+ // Was an abort issued for this command earlier
+ if (scb->state & SCB_ABORT) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: aborted cmd [%x] completed\n",
+ scb->sno));
+ }
+
+ /*
+ * If the inquiry came of a disk drive which is not part of
+ * any RAID array, expose it to the kernel. For this to be
+ * enabled, user must set the "megaraid_expose_unconf_disks"
+ * flag to 1 by specifying it on module parameter list.
+ * This would enable data migration off drives from other
+ * configurations.
+ */
+ islogical = MRAID_IS_LOGICAL(adapter, scp);
+ if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
+ && IS_RAID_CH(raid_dev, scb->dev_channel)) {
+
+ sgl = scsi_sglist(scp);
+ if (sg_page(sgl)) {
+ c = *(unsigned char *) sg_virt(&sgl[0]);
+ } else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: invalid sg:%d\n",
+ __LINE__));
+ c = 0;
+ }
+
+ if ((c & 0x1F ) == TYPE_DISK) {
+ pdev_index = (scb->dev_channel * 16) +
+ scb->dev_target;
+ pdev_state =
+ raid_dev->pdrv_state[pdev_index] & 0x0F;
+
+ if (pdev_state == PDRV_ONLINE ||
+ pdev_state == PDRV_FAILED ||
+ pdev_state == PDRV_RBLD ||
+ pdev_state == PDRV_HOTSPARE ||
+ megaraid_expose_unconf_disks == 0) {
+
+ status = 0xF0;
+ }
+ }
+ }
+
+ // Convert MegaRAID status to Linux error code
+ switch (status) {
+
+ case 0x00:
+
+ scp->result = (DID_OK << 16);
+ break;
+
+ case 0x02:
+
+ /* set sense_buffer and result fields */
+ if (mbox->cmd == MBOXCMD_PASSTHRU ||
+ mbox->cmd == MBOXCMD_PASSTHRU64) {
+
+ memcpy(scp->sense_buffer, pthru->reqsensearea,
+ 14);
+
+ scp->result = DRIVER_SENSE << 24 |
+ DID_OK << 16 | CHECK_CONDITION << 1;
+ }
+ else {
+ if (mbox->cmd == MBOXCMD_EXTPTHRU) {
+
+ memcpy(scp->sense_buffer,
+ epthru->reqsensearea, 14);
+
+ scp->result = DRIVER_SENSE << 24 |
+ DID_OK << 16 |
+ CHECK_CONDITION << 1;
+ } else {
+ scp->sense_buffer[0] = 0x70;
+ scp->sense_buffer[2] = ABORTED_COMMAND;
+ scp->result = CHECK_CONDITION << 1;
+ }
+ }
+ break;
+
+ case 0x08:
+
+ scp->result = DID_BUS_BUSY << 16 | status;
+ break;
+
+ default:
+
+ /*
+ * If TEST_UNIT_READY fails, we know RESERVATION_STATUS
+ * failed
+ */
+ if (scp->cmnd[0] == TEST_UNIT_READY) {
+ scp->result = DID_ERROR << 16 |
+ RESERVATION_CONFLICT << 1;
+ }
+ else
+ /*
+ * Error code returned is 1 if Reserve or Release
+ * failed or the input parameter is invalid
+ */
+ if (status == 1 && (scp->cmnd[0] == RESERVE ||
+ scp->cmnd[0] == RELEASE)) {
+
+ scp->result = DID_ERROR << 16 |
+ RESERVATION_CONFLICT << 1;
+ }
+ else {
+ scp->result = DID_BAD_TARGET << 16 | status;
+ }
+ }
+
+ // print a debug message for all failed commands
+ if (status) {
+ megaraid_mbox_display_scb(adapter, scb);
+ }
+
+ // Free our internal resources and call the mid-layer callback
+ // routine
+ megaraid_mbox_sync_scb(adapter, scb);
+
+ // remove from local clist
+ list_del_init(&scb->list);
+
+ // put back in free list
+ megaraid_dealloc_scb(adapter, scb);
+
+ // send the scsi packet back to kernel
+ scp->scsi_done(scp);
+ }
+
+ return;
+}
+
+
+/**
+ * megaraid_abort_handler - abort the scsi command
+ * @scp : command to be aborted
+ *
+ * Abort a previous SCSI request. Only commands on the pending list can be
+ * aborted. All the commands issued to the F/W must complete.
+ **/
+static int
+megaraid_abort_handler(struct scsi_cmnd *scp)
+{
+ adapter_t *adapter;
+ mraid_device_t *raid_dev;
+ scb_t *scb;
+ scb_t *tmp;
+ int found;
+ unsigned long flags;
+ int i;
+
+
+ adapter = SCP2ADAPTER(scp);
+ raid_dev = ADAP2RAIDDEV(adapter);
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: aborting cmd=%x <c=%d t=%d l=%d>\n",
+ scp->cmnd[0], SCP2CHANNEL(scp),
+ SCP2TARGET(scp), SCP2LUN(scp)));
+
+ // If FW has stopped responding, simply return failure
+ if (raid_dev->hw_error) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: hw error, not aborting\n"));
+ return FAILED;
+ }
+
+ // There might a race here, where the command was completed by the
+ // firmware and now it is on the completed list. Before we could
+ // complete the command to the kernel in dpc, the abort came.
+ // Find out if this is the case to avoid the race.
+ scb = NULL;
+ spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
+ list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) {
+
+ if (scb->scp == scp) { // Found command
+
+ list_del_init(&scb->list); // from completed list
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: %d[%d:%d], abort from completed list\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+
+ scp->result = (DID_ABORT << 16);
+ scp->scsi_done(scp);
+
+ megaraid_dealloc_scb(adapter, scb);
+
+ spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter),
+ flags);
+
+ return SUCCESS;
+ }
+ }
+ spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
+
+
+ // Find out if this command is still on the pending list. If it is and
+ // was never issued, abort and return success. If the command is owned
+ // by the firmware, we must wait for it to complete by the FW.
+ spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+ list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
+
+ if (scb->scp == scp) { // Found command
+
+ list_del_init(&scb->list); // from pending list
+
+ ASSERT(!(scb->state & SCB_ISSUED));
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid abort: [%d:%d], driver owner\n",
+ scb->dev_channel, scb->dev_target));
+
+ scp->result = (DID_ABORT << 16);
+ scp->scsi_done(scp);
+
+ megaraid_dealloc_scb(adapter, scb);
+
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
+ flags);
+
+ return SUCCESS;
+ }
+ }
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+
+ // Check do we even own this command, in which case this would be
+ // owned by the firmware. The only way to locate the FW scb is to
+ // traverse through the list of all SCB, since driver does not
+ // maintain these SCBs on any list
+ found = 0;
+ spin_lock_irq(&adapter->lock);
+ for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+ scb = adapter->kscb_list + i;
+
+ if (scb->scp == scp) {
+
+ found = 1;
+
+ if (!(scb->state & SCB_ISSUED)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid abort: %d[%d:%d], invalid state\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+ BUG();
+ }
+ else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid abort: %d[%d:%d], fw owner\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+ }
+ }
+ }
+ spin_unlock_irq(&adapter->lock);
+
+ if (!found) {
+ con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n"));
+
+ // FIXME: Should there be a callback for this command?
+ return SUCCESS;
+ }
+
+ // We cannot actually abort a command owned by firmware, return
+ // failure and wait for reset. In host reset handler, we will find out
+ // if the HBA is still live
+ return FAILED;
+}
+
+/**
+ * megaraid_reset_handler - device reset handler for mailbox based driver
+ * @scp : reference command
+ *
+ * Reset handler for the mailbox based controller. First try to find out if
+ * the FW is still live, in which case the outstanding commands counter mut go
+ * down to 0. If that happens, also issue the reservation reset command to
+ * relinquish (possible) reservations on the logical drives connected to this
+ * host.
+ **/
+static int
+megaraid_reset_handler(struct scsi_cmnd *scp)
+{
+ adapter_t *adapter;
+ scb_t *scb;
+ scb_t *tmp;
+ mraid_device_t *raid_dev;
+ unsigned long flags;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ int rval;
+ int recovery_window;
+ int recovering;
+ int i;
+ uioc_t *kioc;
+
+ adapter = SCP2ADAPTER(scp);
+ raid_dev = ADAP2RAIDDEV(adapter);
+
+ // return failure if adapter is not responding
+ if (raid_dev->hw_error) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: hw error, cannot reset\n"));
+ return FAILED;
+ }
+
+
+ // Under exceptional conditions, FW can take up to 3 minutes to
+ // complete command processing. Wait for additional 2 minutes for the
+ // pending commands counter to go down to 0. If it doesn't, let the
+ // controller be marked offline
+ // Also, reset all the commands currently owned by the driver
+ spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+ list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
+ list_del_init(&scb->list); // from pending list
+
+ if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: IOCTL packet with %d[%d:%d] being reset\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+
+ scb->status = -1;
+
+ kioc = (uioc_t *)scb->gp;
+ kioc->status = -EFAULT;
+
+ megaraid_mbox_mm_done(adapter, scb);
+ } else {
+ if (scb->scp == scp) { // Found command
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: %d[%d:%d], reset from pending list\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+ } else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: IO packet with %d[%d:%d] being reset\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+ }
+
+ scb->scp->result = (DID_RESET << 16);
+ scb->scp->scsi_done(scb->scp);
+
+ megaraid_dealloc_scb(adapter, scb);
+ }
+ }
+ spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+ if (adapter->outstanding_cmds) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: %d outstanding commands. Max wait %d sec\n",
+ adapter->outstanding_cmds,
+ (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT)));
+ }
+
+ recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
+
+ recovering = adapter->outstanding_cmds;
+
+ for (i = 0; i < recovery_window; i++) {
+
+ megaraid_ack_sequence(adapter);
+
+ // print a message once every 5 seconds only
+ if (!(i % 5)) {
+ con_log(CL_ANN, (
+ "megaraid mbox: Wait for %d commands to complete:%d\n",
+ adapter->outstanding_cmds,
+ (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
+ }
+
+ // bailout if no recovery happened in reset time
+ if (adapter->outstanding_cmds == 0) {
+ break;
+ }
+
+ msleep(1000);
+ }
+
+ spin_lock(&adapter->lock);
+
+ // If still outstanding commands, bail out
+ if (adapter->outstanding_cmds) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mbox: critical hardware error!\n"));
+
+ raid_dev->hw_error = 1;
+
+ rval = FAILED;
+ goto out;
+ }
+ else {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid mbox: reset sequence completed successfully\n"));
+ }
+
+
+ // If the controller supports clustering, reset reservations
+ if (!adapter->ha) {
+ rval = SUCCESS;
+ goto out;
+ }
+
+ // clear reservations if any
+ raw_mbox[0] = CLUSTER_CMD;
+ raw_mbox[2] = RESET_RESERVATIONS;
+
+ rval = SUCCESS;
+ if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) {
+ con_log(CL_ANN,
+ (KERN_INFO "megaraid: reservation reset\n"));
+ }
+ else {
+ rval = FAILED;
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: reservation reset failed\n"));
+ }
+
+ out:
+ spin_unlock(&adapter->lock);
+ return rval;
+}
+
+/*
+ * START: internal commands library
+ *
+ * This section of the driver has the common routine used by the driver and
+ * also has all the FW routines
+ */
+
+/**
+ * mbox_post_sync_cmd() - blocking command to the mailbox based controllers
+ * @adapter : controller's soft state
+ * @raw_mbox : the mailbox
+ *
+ * Issue a scb in synchronous and non-interrupt mode for mailbox based
+ * controllers.
+ */
+static int
+mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox64_t *mbox64;
+ mbox_t *mbox;
+ uint8_t status;
+ int i;
+
+
+ mbox64 = raid_dev->mbox64;
+ mbox = raid_dev->mbox;
+
+ /*
+ * Wait until mailbox is free
+ */
+ if (megaraid_busywait_mbox(raid_dev) != 0)
+ goto blocked_mailbox;
+
+ /*
+ * Copy mailbox data into host structure
+ */
+ memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
+ mbox->cmdid = 0xFE;
+ mbox->busy = 1;
+ mbox->poll = 0;
+ mbox->ack = 0;
+ mbox->numstatus = 0xFF;
+ mbox->status = 0xFF;
+
+ wmb();
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+ // wait for maximum 1 second for status to post. If the status is not
+ // available within 1 second, assume FW is initializing and wait
+ // for an extended amount of time
+ if (mbox->numstatus == 0xFF) { // status not yet available
+ udelay(25);
+
+ for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) {
+ rmb();
+ msleep(1);
+ }
+
+
+ if (i == 1000) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid mailbox: wait for FW to boot "));
+
+ for (i = 0; (mbox->numstatus == 0xFF) &&
+ (i < MBOX_RESET_WAIT); i++) {
+ rmb();
+ con_log(CL_ANN, ("\b\b\b\b\b[%03d]",
+ MBOX_RESET_WAIT - i));
+ msleep(1000);
+ }
+
+ if (i == MBOX_RESET_WAIT) {
+
+ con_log(CL_ANN, (
+ "\nmegaraid mailbox: status not available\n"));
+
+ return -1;
+ }
+ con_log(CL_ANN, ("\b\b\b\b\b[ok] \n"));
+ }
+ }
+
+ // wait for maximum 1 second for poll semaphore
+ if (mbox->poll != 0x77) {
+ udelay(25);
+
+ for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) {
+ rmb();
+ msleep(1);
+ }
+
+ if (i == 1000) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: could not get poll semaphore\n"));
+ return -1;
+ }
+ }
+
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
+ wmb();
+
+ // wait for maximum 1 second for acknowledgement
+ if (RDINDOOR(raid_dev) & 0x2) {
+ udelay(25);
+
+ for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) {
+ rmb();
+ msleep(1);
+ }
+
+ if (i == 1000) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: could not acknowledge\n"));
+ return -1;
+ }
+ }
+ mbox->poll = 0;
+ mbox->ack = 0x77;
+
+ status = mbox->status;
+
+ // invalidate the completed command id array. After command
+ // completion, firmware would write the valid id.
+ mbox->numstatus = 0xFF;
+ mbox->status = 0xFF;
+ for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) {
+ mbox->completed[i] = 0xFF;
+ }
+
+ return status;
+
+blocked_mailbox:
+
+ con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") );
+ return -1;
+}
+
+
+/**
+ * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers
+ * @adapter : controller's soft state
+ * @raw_mbox : the mailbox
+ *
+ * Issue a scb in synchronous and non-interrupt mode for mailbox based
+ * controllers. This is a faster version of the synchronous command and
+ * therefore can be called in interrupt-context as well.
+ */
+static int
+mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox_t *mbox;
+ long i;
+
+
+ mbox = raid_dev->mbox;
+
+ // return immediately if the mailbox is busy
+ if (mbox->busy) return -1;
+
+ // Copy mailbox data into host structure
+ memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14);
+ mbox->cmdid = 0xFE;
+ mbox->busy = 1;
+ mbox->poll = 0;
+ mbox->ack = 0;
+ mbox->numstatus = 0xFF;
+ mbox->status = 0xFF;
+
+ wmb();
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+ for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
+ if (mbox->numstatus != 0xFF) break;
+ rmb();
+ udelay(MBOX_SYNC_DELAY_200);
+ }
+
+ if (i == MBOX_SYNC_WAIT_CNT) {
+ // We may need to re-calibrate the counter
+ con_log(CL_ANN, (KERN_CRIT
+ "megaraid: fast sync command timed out\n"));
+ }
+
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
+ wmb();
+
+ return mbox->status;
+}
+
+
+/**
+ * megaraid_busywait_mbox() - Wait until the controller's mailbox is available
+ * @raid_dev : RAID device (HBA) soft state
+ *
+ * Wait until the controller's mailbox is available to accept more commands.
+ * Wait for at most 1 second.
+ */
+static int
+megaraid_busywait_mbox(mraid_device_t *raid_dev)
+{
+ mbox_t *mbox = raid_dev->mbox;
+ int i = 0;
+
+ if (mbox->busy) {
+ udelay(25);
+ for (i = 0; mbox->busy && i < 1000; i++)
+ msleep(1);
+ }
+
+ if (i < 1000) return 0;
+ else return -1;
+}
+
+
+/**
+ * megaraid_mbox_product_info - some static information about the controller
+ * @adapter : our soft state
+ *
+ * Issue commands to the controller to grab some parameters required by our
+ * caller.
+ */
+static int
+megaraid_mbox_product_info(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ mraid_pinfo_t *pinfo;
+ dma_addr_t pinfo_dma_h;
+ mraid_inquiry3_t *mraid_inq3;
+ int i;
+
+
+ memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+ mbox = (mbox_t *)raw_mbox;
+
+ /*
+ * Issue an ENQUIRY3 command to find out certain adapter parameters,
+ * e.g., max channels, max commands etc.
+ */
+ pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ &pinfo_dma_h);
+
+ if (pinfo == NULL) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+
+ return -1;
+ }
+
+ mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+ memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+ raw_mbox[0] = FC_NEW_CONFIG;
+ raw_mbox[2] = NC_SUBOP_ENQUIRY3;
+ raw_mbox[3] = ENQ3_GET_SOLICITED_FULL;
+
+ // Issue the command
+ if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
+
+ pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ pinfo, pinfo_dma_h);
+
+ return -1;
+ }
+
+ /*
+ * Collect information about state of each physical drive
+ * attached to the controller. We will expose all the disks
+ * which are not part of RAID
+ */
+ mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf;
+ for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) {
+ raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i];
+ }
+
+ /*
+ * Get product info for information like number of channels,
+ * maximum commands supported.
+ */
+ memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+ mbox->xferaddr = (uint32_t)pinfo_dma_h;
+
+ raw_mbox[0] = FC_NEW_CONFIG;
+ raw_mbox[2] = NC_SUBOP_PRODUCT_INFO;
+
+ if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: product info failed\n"));
+
+ pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ pinfo, pinfo_dma_h);
+
+ return -1;
+ }
+
+ /*
+ * Setup some parameters for host, as required by our caller
+ */
+ adapter->max_channel = pinfo->nchannels;
+
+ /*
+ * we will export all the logical drives on a single channel.
+ * Add 1 since inquires do not come for inititor ID
+ */
+ adapter->max_target = MAX_LOGICAL_DRIVES_40LD + 1;
+ adapter->max_lun = 8; // up to 8 LUNs for non-disk devices
+
+ /*
+ * These are the maximum outstanding commands for the scsi-layer
+ */
+ adapter->max_cmds = MBOX_MAX_SCSI_CMDS;
+
+ memset(adapter->fw_version, 0, VERSION_SIZE);
+ memset(adapter->bios_version, 0, VERSION_SIZE);
+
+ memcpy(adapter->fw_version, pinfo->fw_version, 4);
+ adapter->fw_version[4] = 0;
+
+ memcpy(adapter->bios_version, pinfo->bios_version, 4);
+ adapter->bios_version[4] = 0;
+
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: fw version:[%s] bios version:[%s]\n",
+ adapter->fw_version, adapter->bios_version));
+
+ pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
+ pinfo_dma_h);
+
+ return 0;
+}
+
+
+
+/**
+ * megaraid_mbox_extended_cdb - check for support for extended CDBs
+ * @adapter : soft state for the controller
+ *
+ * This routine check whether the controller in question supports extended
+ * ( > 10 bytes ) CDBs.
+ */
+static int
+megaraid_mbox_extended_cdb(adapter_t *adapter)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ int rval;
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+ mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+
+ memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+ raw_mbox[0] = MAIN_MISC_OPCODE;
+ raw_mbox[2] = SUPPORT_EXT_CDB;
+
+ /*
+ * Issue the command
+ */
+ rval = 0;
+ if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+ rval = -1;
+ }
+
+ return rval;
+}
+
+
+/**
+ * megaraid_mbox_support_ha - Do we support clustering
+ * @adapter : soft state for the controller
+ * @init_id : ID of the initiator
+ *
+ * Determine if the firmware supports clustering and the ID of the initiator.
+ */
+static int
+megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ int rval;
+
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+
+ mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+
+ memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+ raw_mbox[0] = GET_TARGET_ID;
+
+ // Issue the command
+ *init_id = 7;
+ rval = -1;
+ if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+
+ *init_id = *(uint8_t *)adapter->ibuf;
+
+ con_log(CL_ANN, (KERN_INFO
+ "megaraid: cluster firmware, initiator ID: %d\n",
+ *init_id));
+
+ rval = 0;
+ }
+
+ return rval;
+}
+
+
+/**
+ * megaraid_mbox_support_random_del - Do we support random deletion
+ * @adapter : soft state for the controller
+ *
+ * Determine if the firmware supports random deletion.
+ * Return: 1 is operation supported, 0 otherwise
+ */
+static int
+megaraid_mbox_support_random_del(adapter_t *adapter)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ int rval;
+
+ /*
+ * Newer firmware on Dell CERC expect a different
+ * random deletion handling, so disable it.
+ */
+ if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI &&
+ adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 &&
+ adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+ adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH &&
+ (adapter->fw_version[0] > '6' ||
+ (adapter->fw_version[0] == '6' &&
+ adapter->fw_version[2] > '6') ||
+ (adapter->fw_version[0] == '6'
+ && adapter->fw_version[2] == '6'
+ && adapter->fw_version[3] > '1'))) {
+ con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n"));
+ return 0;
+ }
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+ raw_mbox[0] = FC_DEL_LOGDRV;
+ raw_mbox[2] = OP_SUP_DEL_LOGDRV;
+
+ // Issue the command
+ rval = 0;
+ if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+
+ con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n"));
+
+ rval = 1;
+ }
+
+ return rval;
+}
+
+
+/**
+ * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware
+ * @adapter : soft state for the controller
+ *
+ * Find out the maximum number of scatter-gather elements supported by the
+ * firmware.
+ */
+static int
+megaraid_mbox_get_max_sg(adapter_t *adapter)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ int nsg;
+
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+ mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+
+ memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+ raw_mbox[0] = MAIN_MISC_OPCODE;
+ raw_mbox[2] = GET_MAX_SG_SUPPORT;
+
+ // Issue the command
+ if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+ nsg = *(uint8_t *)adapter->ibuf;
+ }
+ else {
+ nsg = MBOX_DEFAULT_SG_SIZE;
+ }
+
+ if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE;
+
+ return nsg;
+}
+
+
+/**
+ * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels
+ * @adapter : soft state for the controller
+ *
+ * Enumerate the RAID and SCSI channels for ROMB platforms so that channels
+ * can be exported as regular SCSI channels.
+ */
+static void
+megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+ mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+
+ memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+ raw_mbox[0] = CHNL_CLASS;
+ raw_mbox[2] = GET_CHNL_CLASS;
+
+ // Issue the command. If the command fails, all channels are RAID
+ // channels
+ raid_dev->channel_class = 0xFF;
+ if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+ raid_dev->channel_class = *(uint8_t *)adapter->ibuf;
+ }
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_flush_cache - flush adapter and disks cache
+ * @adapter : soft state for the controller
+ *
+ * Flush adapter cache followed by disks cache.
+ */
+static void
+megaraid_mbox_flush_cache(adapter_t *adapter)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+ raw_mbox[0] = FLUSH_ADAPTER;
+
+ if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+ con_log(CL_ANN, ("megaraid: flush adapter failed\n"));
+ }
+
+ raw_mbox[0] = FLUSH_SYSTEM;
+
+ if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+ con_log(CL_ANN, ("megaraid: flush disks cache failed\n"));
+ }
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_fire_sync_cmd - fire the sync cmd
+ * @adapter : soft state for the controller
+ *
+ * Clears the pending cmds in FW and reinits its RAID structs.
+ */
+static int
+megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
+{
+ mbox_t *mbox;
+ uint8_t raw_mbox[sizeof(mbox_t)];
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mbox64_t *mbox64;
+ int status = 0;
+ int i;
+ uint32_t dword;
+
+ mbox = (mbox_t *)raw_mbox;
+
+ memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+ raw_mbox[0] = 0xFF;
+
+ mbox64 = raid_dev->mbox64;
+ mbox = raid_dev->mbox;
+
+ /* Wait until mailbox is free */
+ if (megaraid_busywait_mbox(raid_dev) != 0) {
+ status = 1;
+ goto blocked_mailbox;
+ }
+
+ /* Copy mailbox data into host structure */
+ memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
+ mbox->cmdid = 0xFE;
+ mbox->busy = 1;
+ mbox->poll = 0;
+ mbox->ack = 0;
+ mbox->numstatus = 0;
+ mbox->status = 0;
+
+ wmb();
+ WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+ /* Wait for maximum 1 min for status to post.
+ * If the Firmware SUPPORTS the ABOVE COMMAND,
+ * mbox->cmd will be set to 0
+ * else
+ * the firmware will reject the command with
+ * mbox->numstatus set to 1
+ */
+
+ i = 0;
+ status = 0;
+ while (!mbox->numstatus && mbox->cmd == 0xFF) {
+ rmb();
+ msleep(1);
+ i++;
+ if (i > 1000 * 60) {
+ status = 1;
+ break;
+ }
+ }
+ if (mbox->numstatus == 1)
+ status = 1; /*cmd not supported*/
+
+ /* Check for interrupt line */
+ dword = RDOUTDOOR(raid_dev);
+ WROUTDOOR(raid_dev, dword);
+ WRINDOOR(raid_dev,2);
+
+ return status;
+
+blocked_mailbox:
+ con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n"));
+ return status;
+}
+
+/**
+ * megaraid_mbox_display_scb - display SCB information, mostly debug purposes
+ * @adapter : controller's soft state
+ * @scb : SCB to be displayed
+ * @level : debug level for console print
+ *
+ * Diplay information about the given SCB iff the current debug level is
+ * verbose.
+ */
+static void
+megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
+{
+ mbox_ccb_t *ccb;
+ struct scsi_cmnd *scp;
+ mbox_t *mbox;
+ int level;
+ int i;
+
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ scp = scb->scp;
+ mbox = ccb->mbox;
+
+ level = CL_DLEVEL3;
+
+ con_log(level, (KERN_NOTICE
+ "megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status,
+ mbox->cmd, scb->sno));
+
+ con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n",
+ mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv,
+ mbox->numsge));
+
+ if (!scp) return;
+
+ con_log(level, (KERN_NOTICE "scsi cmnd: "));
+
+ for (i = 0; i < scp->cmd_len; i++) {
+ con_log(level, ("%#2.02x ", scp->cmnd[i]));
+ }
+
+ con_log(level, ("\n"));
+
+ return;
+}
+
+
+/**
+ * megaraid_mbox_setup_device_map - manage device ids
+ * @adapter : Driver's soft state
+ *
+ * Manage the device ids to have an appropriate mapping between the kernel
+ * scsi addresses and megaraid scsi and logical drive addresses. We export
+ * scsi devices on their actual addresses, whereas the logical drives are
+ * exported on a virtual scsi channel.
+ */
+static void
+megaraid_mbox_setup_device_map(adapter_t *adapter)
+{
+ uint8_t c;
+ uint8_t t;
+
+ /*
+ * First fill the values on the logical drive channel
+ */
+ for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
+ adapter->device_ids[adapter->max_channel][t] =
+ (t < adapter->init_id) ? t : t - 1;
+
+ adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF;
+
+ /*
+ * Fill the values on the physical devices channels
+ */
+ for (c = 0; c < adapter->max_channel; c++)
+ for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
+ adapter->device_ids[c][t] = (c << 8) | t;
+}
+
+
+/*
+ * END: internal commands library
+ */
+
+/*
+ * START: Interface for the common management module
+ *
+ * This is the module, which interfaces with the common management module to
+ * provide support for ioctl and sysfs
+ */
+
+/**
+ * megaraid_cmm_register - register with the management module
+ * @adapter : HBA soft state
+ *
+ * Register with the management module, which allows applications to issue
+ * ioctl calls to the drivers. This interface is used by the management module
+ * to setup sysfs support as well.
+ */
+static int
+megaraid_cmm_register(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ mraid_mmadp_t adp;
+ scb_t *scb;
+ mbox_ccb_t *ccb;
+ int rval;
+ int i;
+
+ // Allocate memory for the base list of scb for management module.
+ adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
+
+ if (adapter->uscb_list == NULL) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+ return -1;
+ }
+
+
+ // Initialize the synchronization parameters for resources for
+ // commands for management module
+ INIT_LIST_HEAD(&adapter->uscb_pool);
+
+ spin_lock_init(USER_FREE_LIST_LOCK(adapter));
+
+
+
+ // link all the packets. Note, CCB for commands, coming from the
+ // commom management module, mailbox physical address are already
+ // setup by it. We just need placeholder for that in our local command
+ // control blocks
+ for (i = 0; i < MBOX_MAX_USER_CMDS; i++) {
+
+ scb = adapter->uscb_list + i;
+ ccb = raid_dev->uccb_list + i;
+
+ scb->ccb = (caddr_t)ccb;
+ ccb->mbox64 = raid_dev->umbox64 + i;
+ ccb->mbox = &ccb->mbox64->mbox32;
+ ccb->raw_mbox = (uint8_t *)ccb->mbox;
+
+ scb->gp = 0;
+
+ // COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR
+ // COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER)
+ scb->sno = i + MBOX_MAX_SCSI_CMDS;
+
+ scb->scp = NULL;
+ scb->state = SCB_FREE;
+ scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_type = MRAID_DMA_NONE;
+ scb->dev_channel = -1;
+ scb->dev_target = -1;
+
+ // put scb in the free pool
+ list_add_tail(&scb->list, &adapter->uscb_pool);
+ }
+
+ adp.unique_id = adapter->unique_id;
+ adp.drvr_type = DRVRTYPE_MBOX;
+ adp.drvr_data = (unsigned long)adapter;
+ adp.pdev = adapter->pdev;
+ adp.issue_uioc = megaraid_mbox_mm_handler;
+ adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
+ adp.max_kioc = MBOX_MAX_USER_CMDS;
+
+ if ((rval = mraid_mm_register_adp(&adp)) != 0) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mbox: did not register with CMM\n"));
+
+ kfree(adapter->uscb_list);
+ }
+
+ return rval;
+}
+
+
+/**
+ * megaraid_cmm_unregister - un-register with the management module
+ * @adapter : HBA soft state
+ *
+ * Un-register with the management module.
+ * FIXME: mgmt module must return failure for unregister if it has pending
+ * commands in LLD.
+ */
+static int
+megaraid_cmm_unregister(adapter_t *adapter)
+{
+ kfree(adapter->uscb_list);
+ mraid_mm_unregister_adp(adapter->unique_id);
+ return 0;
+}
+
+
+/**
+ * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD
+ * @drvr_data : LLD specific data
+ * @kioc : CMM interface packet
+ * @action : command action
+ *
+ * This routine is invoked whenever the Common Management Module (CMM) has a
+ * command for us. The 'action' parameter specifies if this is a new command
+ * or otherwise.
+ */
+static int
+megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action)
+{
+ adapter_t *adapter;
+
+ if (action != IOCTL_ISSUE) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: unsupported management action:%#2x\n",
+ action));
+ return (-ENOTSUPP);
+ }
+
+ adapter = (adapter_t *)drvr_data;
+
+ // make sure this adapter is not being detached right now.
+ if (atomic_read(&adapter->being_detached)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: reject management request, detaching\n"));
+ return (-ENODEV);
+ }
+
+ switch (kioc->opcode) {
+
+ case GET_ADAP_INFO:
+
+ kioc->status = gather_hbainfo(adapter, (mraid_hba_info_t *)
+ (unsigned long)kioc->buf_vaddr);
+
+ kioc->done(kioc);
+
+ return kioc->status;
+
+ case MBOX_CMD:
+
+ return megaraid_mbox_mm_command(adapter, kioc);
+
+ default:
+ kioc->status = (-EINVAL);
+ kioc->done(kioc);
+ return (-EINVAL);
+ }
+
+ return 0; // not reached
+}
+
+/**
+ * megaraid_mbox_mm_command - issues commands routed through CMM
+ * @adapter : HBA soft state
+ * @kioc : management command packet
+ *
+ * Issues commands, which are routed through the management module.
+ */
+static int
+megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
+{
+ struct list_head *head = &adapter->uscb_pool;
+ mbox64_t *mbox64;
+ uint8_t *raw_mbox;
+ scb_t *scb;
+ mbox_ccb_t *ccb;
+ unsigned long flags;
+
+ // detach one scb from free pool
+ spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
+
+ if (list_empty(head)) { // should never happen because of CMM
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mbox: bug in cmm handler, lost resources\n"));
+
+ spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
+
+ return (-EINVAL);
+ }
+
+ scb = list_entry(head->next, scb_t, list);
+ list_del_init(&scb->list);
+
+ spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
+
+ scb->state = SCB_ACTIVE;
+ scb->dma_type = MRAID_DMA_NONE;
+ scb->dma_direction = PCI_DMA_NONE;
+
+ ccb = (mbox_ccb_t *)scb->ccb;
+ mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
+ raw_mbox = (uint8_t *)&mbox64->mbox32;
+
+ memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t));
+
+ scb->gp = (unsigned long)kioc;
+
+ /*
+ * If it is a logdrv random delete operation, we have to wait till
+ * there are no outstanding cmds at the fw and then issue it directly
+ */
+ if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
+
+ if (wait_till_fw_empty(adapter)) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid mbox: LD delete, timed out\n"));
+
+ kioc->status = -ETIME;
+
+ scb->status = -1;
+
+ megaraid_mbox_mm_done(adapter, scb);
+
+ return (-ETIME);
+ }
+
+ INIT_LIST_HEAD(&scb->list);
+
+ scb->state = SCB_ISSUED;
+ if (mbox_post_cmd(adapter, scb) != 0) {
+
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid mbox: LD delete, mailbox busy\n"));
+
+ kioc->status = -EBUSY;
+
+ scb->status = -1;
+
+ megaraid_mbox_mm_done(adapter, scb);
+
+ return (-EBUSY);
+ }
+
+ return 0;
+ }
+
+ // put the command on the pending list and execute
+ megaraid_mbox_runpendq(adapter, scb);
+
+ return 0;
+}
+
+
+static int
+wait_till_fw_empty(adapter_t *adapter)
+{
+ unsigned long flags = 0;
+ int i;
+
+
+ /*
+ * Set the quiescent flag to stop issuing cmds to FW.
+ */
+ spin_lock_irqsave(&adapter->lock, flags);
+ adapter->quiescent++;
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ /*
+ * Wait till there are no more cmds outstanding at FW. Try for at most
+ * 60 seconds
+ */
+ for (i = 0; i < 60 && adapter->outstanding_cmds; i++) {
+ con_log(CL_DLEVEL1, (KERN_INFO
+ "megaraid: FW has %d pending commands\n",
+ adapter->outstanding_cmds));
+
+ msleep(1000);
+ }
+
+ return adapter->outstanding_cmds;
+}
+
+
+/**
+ * megaraid_mbox_mm_done - callback for CMM commands
+ * @adapter : HBA soft state
+ * @scb : completed command
+ *
+ * Callback routine for internal commands originated from the management
+ * module.
+ */
+static void
+megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
+{
+ uioc_t *kioc;
+ mbox64_t *mbox64;
+ uint8_t *raw_mbox;
+ unsigned long flags;
+
+ kioc = (uioc_t *)scb->gp;
+ mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
+ mbox64->mbox32.status = scb->status;
+ raw_mbox = (uint8_t *)&mbox64->mbox32;
+
+
+ // put scb in the free pool
+ scb->state = SCB_FREE;
+ scb->scp = NULL;
+
+ spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
+
+ list_add(&scb->list, &adapter->uscb_pool);
+
+ spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
+
+ // if a delete logical drive operation succeeded, restart the
+ // controller
+ if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
+
+ adapter->quiescent--;
+
+ megaraid_mbox_runpendq(adapter, NULL);
+ }
+
+ kioc->done(kioc);
+
+ return;
+}
+
+
+/**
+ * gather_hbainfo - HBA characteristics for the applications
+ * @adapter : HBA soft state
+ * @hinfo : pointer to the caller's host info strucuture
+ */
+static int
+gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
+{
+ uint8_t dmajor;
+
+ dmajor = megaraid_mbox_version[0];
+
+ hinfo->pci_vendor_id = adapter->pdev->vendor;
+ hinfo->pci_device_id = adapter->pdev->device;
+ hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
+ hinfo->subsys_device_id = adapter->pdev->subsystem_device;
+
+ hinfo->pci_bus = adapter->pdev->bus->number;
+ hinfo->pci_dev_fn = adapter->pdev->devfn;
+ hinfo->pci_slot = PCI_SLOT(adapter->pdev->devfn);
+ hinfo->irq = adapter->host->irq;
+ hinfo->baseport = ADAP2RAIDDEV(adapter)->baseport;
+
+ hinfo->unique_id = (hinfo->pci_bus << 8) | adapter->pdev->devfn;
+ hinfo->host_no = adapter->host->host_no;
+
+ return 0;
+}
+
+/*
+ * END: Interface for the common management module
+ */
+
+
+
+/**
+ * megaraid_sysfs_alloc_resources - allocate sysfs related resources
+ * @adapter : controller's soft state
+ *
+ * Allocate packets required to issue FW calls whenever the sysfs attributes
+ * are read. These attributes would require up-to-date information from the
+ * FW. Also set up resources for mutual exclusion to share these resources and
+ * the wait queue.
+ *
+ * Return 0 on success.
+ * Return -ERROR_CODE on failure.
+ */
+static int
+megaraid_sysfs_alloc_resources(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ int rval = 0;
+
+ raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL);
+
+ raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
+
+ raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
+ PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
+
+ if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
+ !raid_dev->sysfs_buffer) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: out of memory, %s %d\n", __func__,
+ __LINE__));
+
+ rval = -ENOMEM;
+
+ megaraid_sysfs_free_resources(adapter);
+ }
+
+ mutex_init(&raid_dev->sysfs_mtx);
+
+ init_waitqueue_head(&raid_dev->sysfs_wait_q);
+
+ return rval;
+}
+
+
+/**
+ * megaraid_sysfs_free_resources - free sysfs related resources
+ * @adapter : controller's soft state
+ *
+ * Free packets allocated for sysfs FW commands
+ */
+static void
+megaraid_sysfs_free_resources(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+ kfree(raid_dev->sysfs_uioc);
+ kfree(raid_dev->sysfs_mbox64);
+
+ if (raid_dev->sysfs_buffer) {
+ pci_free_consistent(adapter->pdev, PAGE_SIZE,
+ raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
+ }
+}
+
+
+/**
+ * megaraid_sysfs_get_ldmap_done - callback for get ldmap
+ * @uioc : completed packet
+ *
+ * Callback routine called in the ISR/tasklet context for get ldmap call
+ */
+static void
+megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
+{
+ adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+ uioc->status = 0;
+
+ wake_up(&raid_dev->sysfs_wait_q);
+}
+
+
+/**
+ * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap
+ * @data : timed out packet
+ *
+ * Timeout routine to recover and return to application, in case the adapter
+ * has stopped responding. A timeout of 60 seconds for this command seems like
+ * a good value.
+ */
+static void
+megaraid_sysfs_get_ldmap_timeout(unsigned long data)
+{
+ uioc_t *uioc = (uioc_t *)data;
+ adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+ uioc->status = -ETIME;
+
+ wake_up(&raid_dev->sysfs_wait_q);
+}
+
+
+/**
+ * megaraid_sysfs_get_ldmap - get update logical drive map
+ * @adapter : controller's soft state
+ *
+ * This routine will be called whenever user reads the logical drive
+ * attributes, go get the current logical drive mapping table from the
+ * firmware. We use the management API's to issue commands to the controller.
+ *
+ * NOTE: The commands issuance functionality is not generalized and
+ * implemented in context of "get ld map" command only. If required, the
+ * command issuance logical can be trivially pulled out and implemented as a
+ * standalone library. For now, this should suffice since there is no other
+ * user of this interface.
+ *
+ * Return 0 on success.
+ * Return -1 on failure.
+ */
+static int
+megaraid_sysfs_get_ldmap(adapter_t *adapter)
+{
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ uioc_t *uioc;
+ mbox64_t *mbox64;
+ mbox_t *mbox;
+ char *raw_mbox;
+ struct timer_list sysfs_timer;
+ struct timer_list *timerp;
+ caddr_t ldmap;
+ int rval = 0;
+
+ /*
+ * Allow only one read at a time to go through the sysfs attributes
+ */
+ mutex_lock(&raid_dev->sysfs_mtx);
+
+ uioc = raid_dev->sysfs_uioc;
+ mbox64 = raid_dev->sysfs_mbox64;
+ ldmap = raid_dev->sysfs_buffer;
+
+ memset(uioc, 0, sizeof(uioc_t));
+ memset(mbox64, 0, sizeof(mbox64_t));
+ memset(ldmap, 0, sizeof(raid_dev->curr_ldmap));
+
+ mbox = &mbox64->mbox32;
+ raw_mbox = (char *)mbox;
+ uioc->cmdbuf = (uint64_t)(unsigned long)mbox64;
+ uioc->buf_vaddr = (caddr_t)adapter;
+ uioc->status = -ENODATA;
+ uioc->done = megaraid_sysfs_get_ldmap_done;
+
+ /*
+ * Prepare the mailbox packet to get the current logical drive mapping
+ * table
+ */
+ mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma;
+
+ raw_mbox[0] = FC_DEL_LOGDRV;
+ raw_mbox[2] = OP_GET_LDID_MAP;
+
+ /*
+ * Setup a timer to recover from a non-responding controller
+ */
+ timerp = &sysfs_timer;
+ init_timer(timerp);
+
+ timerp->function = megaraid_sysfs_get_ldmap_timeout;
+ timerp->data = (unsigned long)uioc;
+ timerp->expires = jiffies + 60 * HZ;
+
+ add_timer(timerp);
+
+ /*
+ * Send the command to the firmware
+ */
+ rval = megaraid_mbox_mm_command(adapter, uioc);
+
+ if (rval == 0) { // command successfully issued
+ wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA));
+
+ /*
+ * Check if the command timed out
+ */
+ if (uioc->status == -ETIME) {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: sysfs get ld map timed out\n"));
+
+ rval = -ETIME;
+ }
+ else {
+ rval = mbox->status;
+ }
+
+ if (rval == 0) {
+ memcpy(raid_dev->curr_ldmap, ldmap,
+ sizeof(raid_dev->curr_ldmap));
+ }
+ else {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: get ld map failed with %x\n", rval));
+ }
+ }
+ else {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: could not issue ldmap command:%x\n", rval));
+ }
+
+
+ del_timer_sync(timerp);
+
+ mutex_unlock(&raid_dev->sysfs_mtx);
+
+ return rval;
+}
+
+
+/**
+ * megaraid_sysfs_show_app_hndl - display application handle for this adapter
+ * @cdev : class device object representation for the host
+ * @buf : buffer to send data to
+ *
+ * Display the handle used by the applications while executing management
+ * tasks on the adapter. We invoke a management module API to get the adapter
+ * handle, since we do not interface with applications directly.
+ */
+static ssize_t
+megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost);
+ uint32_t app_hndl;
+
+ app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
+
+ return snprintf(buf, 8, "%u\n", app_hndl);
+}
+
+
+/**
+ * megaraid_sysfs_show_ldnum - display the logical drive number for this device
+ * @dev : device object representation for the scsi device
+ * @attr : device attribute to show
+ * @buf : buffer to send data to
+ *
+ * Display the logical drive number for the device in question, if it a valid
+ * logical drive. For physical devices, "-1" is returned.
+ *
+ * The logical drive number is displayed in following format:
+ *
+ * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE>
+ *
+ * <int> <int> <int> <int>
+ */
+static ssize_t
+megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host);
+ mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+ int scsi_id = -1;
+ int logical_drv = -1;
+ int ldid_map = -1;
+ uint32_t app_hndl = 0;
+ int mapped_sdev_id;
+ int rval;
+ int i;
+
+ if (raid_dev->random_del_supported &&
+ MRAID_IS_LOGICAL_SDEV(adapter, sdev)) {
+
+ rval = megaraid_sysfs_get_ldmap(adapter);
+ if (rval == 0) {
+
+ for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) {
+
+ mapped_sdev_id = sdev->id;
+
+ if (sdev->id > adapter->init_id) {
+ mapped_sdev_id -= 1;
+ }
+
+ if (raid_dev->curr_ldmap[i] == mapped_sdev_id) {
+
+ scsi_id = sdev->id;
+
+ logical_drv = i;
+
+ ldid_map = raid_dev->curr_ldmap[i];
+
+ app_hndl = mraid_mm_adapter_app_handle(
+ adapter->unique_id);
+
+ break;
+ }
+ }
+ }
+ else {
+ con_log(CL_ANN, (KERN_NOTICE
+ "megaraid: sysfs get ld map failed: %x\n",
+ rval));
+ }
+ }
+
+ return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
+ ldid_map, app_hndl);
+}
+
+
+/*
+ * END: Mailbox Low Level Driver
+ */
+module_init(megaraid_init);
+module_exit(megaraid_exit);
+
+/* vim: set ts=8 sw=8 tw=78 ai si: */
diff --git a/kernel/drivers/scsi/megaraid/megaraid_mbox.h b/kernel/drivers/scsi/megaraid/megaraid_mbox.h
new file mode 100644
index 000000000..c1d86d961
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/megaraid_mbox.h
@@ -0,0 +1,238 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_mbox.h
+ */
+
+#ifndef _MEGARAID_H_
+#define _MEGARAID_H_
+
+
+#include "mega_common.h"
+#include "mbox_defs.h"
+#include "megaraid_ioctl.h"
+
+
+#define MEGARAID_VERSION "2.20.5.1"
+#define MEGARAID_EXT_VERSION "(Release Date: Thu Nov 16 15:32:35 EST 2006)"
+
+
+/*
+ * Define some PCI values here until they are put in the kernel
+ */
+#define PCI_DEVICE_ID_PERC4_DI_DISCOVERY 0x000E
+#define PCI_SUBSYS_ID_PERC4_DI_DISCOVERY 0x0123
+
+#define PCI_DEVICE_ID_PERC4_SC 0x1960
+#define PCI_SUBSYS_ID_PERC4_SC 0x0520
+
+#define PCI_DEVICE_ID_PERC4_DC 0x1960
+#define PCI_SUBSYS_ID_PERC4_DC 0x0518
+
+#define PCI_DEVICE_ID_VERDE 0x0407
+
+#define PCI_DEVICE_ID_PERC4_DI_EVERGLADES 0x000F
+#define PCI_SUBSYS_ID_PERC4_DI_EVERGLADES 0x014A
+
+#define PCI_DEVICE_ID_PERC4E_SI_BIGBEND 0x0013
+#define PCI_SUBSYS_ID_PERC4E_SI_BIGBEND 0x016c
+
+#define PCI_DEVICE_ID_PERC4E_DI_KOBUK 0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_KOBUK 0x016d
+
+#define PCI_DEVICE_ID_PERC4E_DI_CORVETTE 0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_CORVETTE 0x016e
+
+#define PCI_DEVICE_ID_PERC4E_DI_EXPEDITION 0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION 0x016f
+
+#define PCI_DEVICE_ID_PERC4E_DI_GUADALUPE 0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE 0x0170
+
+#define PCI_DEVICE_ID_DOBSON 0x0408
+
+#define PCI_DEVICE_ID_MEGARAID_SCSI_320_0 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_0 0xA520
+
+#define PCI_DEVICE_ID_MEGARAID_SCSI_320_1 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_1 0x0520
+
+#define PCI_DEVICE_ID_MEGARAID_SCSI_320_2 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_2 0x0518
+
+#define PCI_DEVICE_ID_MEGARAID_I4_133_RAID 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_I4_133_RAID 0x0522
+
+#define PCI_DEVICE_ID_MEGARAID_SATA_150_4 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SATA_150_4 0x4523
+
+#define PCI_DEVICE_ID_MEGARAID_SATA_150_6 0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SATA_150_6 0x0523
+
+#define PCI_DEVICE_ID_LINDSAY 0x0409
+
+#define PCI_DEVICE_ID_INTEL_RAID_SRCS16 0x1960
+#define PCI_SUBSYS_ID_INTEL_RAID_SRCS16 0x0523
+
+#define PCI_DEVICE_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK 0x1960
+#define PCI_SUBSYS_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK 0x0520
+
+#define PCI_SUBSYS_ID_PERC3_QC 0x0471
+#define PCI_SUBSYS_ID_PERC3_DC 0x0493
+#define PCI_SUBSYS_ID_PERC3_SC 0x0475
+#define PCI_SUBSYS_ID_CERC_ATA100_4CH 0x0511
+
+
+#define MBOX_MAX_SCSI_CMDS 128 // number of cmds reserved for kernel
+#define MBOX_MAX_USER_CMDS 32 // number of cmds for applications
+#define MBOX_DEF_CMD_PER_LUN 64 // default commands per lun
+#define MBOX_DEFAULT_SG_SIZE 26 // default sg size supported by all fw
+#define MBOX_MAX_SG_SIZE 32 // maximum scatter-gather list size
+#define MBOX_MAX_SECTORS 128 // maximum sectors per IO
+#define MBOX_TIMEOUT 30 // timeout value for internal cmds
+#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox
+#define MBOX_RESET_WAIT 180 // wait these many seconds in reset
+#define MBOX_RESET_EXT_WAIT 120 // extended wait reset
+#define MBOX_SYNC_WAIT_CNT 0xFFFF // wait loop index for synchronous mode
+
+#define MBOX_SYNC_DELAY_200 200 // 200 micro-seconds
+
+/*
+ * maximum transfer that can happen through the firmware commands issued
+ * internnaly from the driver.
+ */
+#define MBOX_IBUF_SIZE 4096
+
+
+/**
+ * mbox_ccb_t - command control block specific to mailbox based controllers
+ * @raw_mbox : raw mailbox pointer
+ * @mbox : mailbox
+ * @mbox64 : extended mailbox
+ * @mbox_dma_h : maibox dma address
+ * @sgl64 : 64-bit scatter-gather list
+ * @sgl32 : 32-bit scatter-gather list
+ * @sgl_dma_h : dma handle for the scatter-gather list
+ * @pthru : passthru structure
+ * @pthru_dma_h : dma handle for the passthru structure
+ * @epthru : extended passthru structure
+ * @epthru_dma_h : dma handle for extended passthru structure
+ * @buf_dma_h : dma handle for buffers w/o sg list
+ *
+ * command control block specific to the mailbox based controllers
+ */
+typedef struct {
+ uint8_t *raw_mbox;
+ mbox_t *mbox;
+ mbox64_t *mbox64;
+ dma_addr_t mbox_dma_h;
+ mbox_sgl64 *sgl64;
+ mbox_sgl32 *sgl32;
+ dma_addr_t sgl_dma_h;
+ mraid_passthru_t *pthru;
+ dma_addr_t pthru_dma_h;
+ mraid_epassthru_t *epthru;
+ dma_addr_t epthru_dma_h;
+ dma_addr_t buf_dma_h;
+} mbox_ccb_t;
+
+
+/**
+ * mraid_device_t - adapter soft state structure for mailbox controllers
+ * @una_mbox64 : 64-bit mbox - unaligned
+ * @una_mbox64_dma : mbox dma addr - unaligned
+ * @mbox : 32-bit mbox - aligned
+ * @mbox64 : 64-bit mbox - aligned
+ * @mbox_dma : mbox dma addr - aligned
+ * @mailbox_lock : exclusion lock for the mailbox
+ * @baseport : base port of hba memory
+ * @baseaddr : mapped addr of hba memory
+ * @mbox_pool : pool of mailboxes
+ * @mbox_pool_handle : handle for the mailbox pool memory
+ * @epthru_pool : a pool for extended passthru commands
+ * @epthru_pool_handle : handle to the pool above
+ * @sg_pool : pool of scatter-gather lists for this driver
+ * @sg_pool_handle : handle to the pool above
+ * @ccb_list : list of our command control blocks
+ * @uccb_list : list of cmd control blocks for mgmt module
+ * @umbox64 : array of mailbox for user commands (cmm)
+ * @pdrv_state : array for state of each physical drive.
+ * @last_disp : flag used to show device scanning
+ * @hw_error : set if FW not responding
+ * @fast_load : If set, skip physical device scanning
+ * @channel_class : channel class, RAID or SCSI
+ * @sysfs_mtx : mutex to serialize access to sysfs res.
+ * @sysfs_uioc : management packet to issue FW calls from sysfs
+ * @sysfs_mbox64 : mailbox packet to issue FW calls from sysfs
+ * @sysfs_buffer : data buffer for FW commands issued from sysfs
+ * @sysfs_buffer_dma : DMA buffer for FW commands issued from sysfs
+ * @sysfs_wait_q : wait queue for sysfs operations
+ * @random_del_supported : set if the random deletion is supported
+ * @curr_ldmap : current LDID map
+ *
+ * Initialization structure for mailbox controllers: memory based and IO based
+ * All the fields in this structure are LLD specific and may be discovered at
+ * init() or start() time.
+ *
+ * NOTE: The fields of this structures are placed to minimize cache misses
+ */
+#define MAX_LD_EXTENDED64 64
+typedef struct {
+ mbox64_t *una_mbox64;
+ dma_addr_t una_mbox64_dma;
+ mbox_t *mbox;
+ mbox64_t *mbox64;
+ dma_addr_t mbox_dma;
+ spinlock_t mailbox_lock;
+ unsigned long baseport;
+ void __iomem * baseaddr;
+ struct mraid_pci_blk mbox_pool[MBOX_MAX_SCSI_CMDS];
+ struct dma_pool *mbox_pool_handle;
+ struct mraid_pci_blk epthru_pool[MBOX_MAX_SCSI_CMDS];
+ struct dma_pool *epthru_pool_handle;
+ struct mraid_pci_blk sg_pool[MBOX_MAX_SCSI_CMDS];
+ struct dma_pool *sg_pool_handle;
+ mbox_ccb_t ccb_list[MBOX_MAX_SCSI_CMDS];
+ mbox_ccb_t uccb_list[MBOX_MAX_USER_CMDS];
+ mbox64_t umbox64[MBOX_MAX_USER_CMDS];
+
+ uint8_t pdrv_state[MBOX_MAX_PHYSICAL_DRIVES];
+ uint32_t last_disp;
+ int hw_error;
+ int fast_load;
+ uint8_t channel_class;
+ struct mutex sysfs_mtx;
+ uioc_t *sysfs_uioc;
+ mbox64_t *sysfs_mbox64;
+ caddr_t sysfs_buffer;
+ dma_addr_t sysfs_buffer_dma;
+ wait_queue_head_t sysfs_wait_q;
+ int random_del_supported;
+ uint16_t curr_ldmap[MAX_LD_EXTENDED64];
+} mraid_device_t;
+
+// route to raid device from adapter
+#define ADAP2RAIDDEV(adp) ((mraid_device_t *)((adp)->raid_device))
+
+#define MAILBOX_LOCK(rdev) (&(rdev)->mailbox_lock)
+
+// Find out if this channel is a RAID or SCSI
+#define IS_RAID_CH(rdev, ch) (((rdev)->channel_class >> (ch)) & 0x01)
+
+
+#define RDINDOOR(rdev) readl((rdev)->baseaddr + 0x20)
+#define RDOUTDOOR(rdev) readl((rdev)->baseaddr + 0x2C)
+#define WRINDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x20)
+#define WROUTDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x2C)
+
+#endif // _MEGARAID_H_
+
+// vim: set ts=8 sw=8 tw=78:
diff --git a/kernel/drivers/scsi/megaraid/megaraid_mm.c b/kernel/drivers/scsi/megaraid/megaraid_mm.c
new file mode 100644
index 000000000..a70692779
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/megaraid_mm.c
@@ -0,0 +1,1263 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_mm.c
+ * Version : v2.20.2.7 (Jul 16 2006)
+ *
+ * Common management module
+ */
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "megaraid_mm.h"
+
+
+// Entry points for char node driver
+static DEFINE_MUTEX(mraid_mm_mutex);
+static int mraid_mm_open(struct inode *, struct file *);
+static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
+
+
+// routines to convert to and from the old the format
+static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
+static int kioc_to_mimd(uioc_t *, mimd_t __user *);
+
+
+// Helper functions
+static int handle_drvrcmd(void __user *, uint8_t, int *);
+static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
+static void ioctl_done(uioc_t *);
+static void lld_timedout(unsigned long);
+static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
+static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
+static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
+static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
+static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
+static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
+static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
+static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
+
+#ifdef CONFIG_COMPAT
+static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
+#endif
+
+MODULE_AUTHOR("LSI Logic Corporation");
+MODULE_DESCRIPTION("LSI Logic Management Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LSI_COMMON_MOD_VERSION);
+
+static int dbglevel = CL_ANN;
+module_param_named(dlevel, dbglevel, int, 0);
+MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
+
+EXPORT_SYMBOL(mraid_mm_register_adp);
+EXPORT_SYMBOL(mraid_mm_unregister_adp);
+EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
+
+static uint32_t drvr_ver = 0x02200207;
+
+static int adapters_count_g;
+static struct list_head adapters_list_g;
+
+static wait_queue_head_t wait_q;
+
+static const struct file_operations lsi_fops = {
+ .open = mraid_mm_open,
+ .unlocked_ioctl = mraid_mm_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mraid_mm_compat_ioctl,
+#endif
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice megaraid_mm_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "megadev0",
+ .fops = &lsi_fops,
+};
+
+/**
+ * mraid_mm_open - open routine for char node interface
+ * @inode : unused
+ * @filep : unused
+ *
+ * Allow ioctl operations by apps only if they have superuser privilege.
+ */
+static int
+mraid_mm_open(struct inode *inode, struct file *filep)
+{
+ /*
+ * Only allow superuser to access private ioctl interface
+ */
+ if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
+
+ return 0;
+}
+
+/**
+ * mraid_mm_ioctl - module entry-point for ioctls
+ * @inode : inode (ignored)
+ * @filep : file operations pointer (ignored)
+ * @cmd : ioctl command
+ * @arg : user ioctl packet
+ */
+static int
+mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ uioc_t *kioc;
+ char signature[EXT_IOCTL_SIGN_SZ] = {0};
+ int rval;
+ mraid_mmadp_t *adp;
+ uint8_t old_ioctl;
+ int drvrcmd_rval;
+ void __user *argp = (void __user *)arg;
+
+ /*
+ * Make sure only USCSICMD are issued through this interface.
+ * MIMD application would still fire different command.
+ */
+
+ if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
+ return (-EINVAL);
+ }
+
+ /*
+ * Look for signature to see if this is the new or old ioctl format.
+ */
+ if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: copy from usr addr failed\n"));
+ return (-EFAULT);
+ }
+
+ if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
+ old_ioctl = 0;
+ else
+ old_ioctl = 1;
+
+ /*
+ * At present, we don't support the new ioctl packet
+ */
+ if (!old_ioctl )
+ return (-EINVAL);
+
+ /*
+ * If it is a driver ioctl (as opposed to fw ioctls), then we can
+ * handle the command locally. rval > 0 means it is not a drvr cmd
+ */
+ rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
+
+ if (rval < 0)
+ return rval;
+ else if (rval == 0)
+ return drvrcmd_rval;
+
+ rval = 0;
+ if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
+ return rval;
+ }
+
+ /*
+ * Check if adapter can accept ioctl. We may have marked it offline
+ * if any previous kioc had timedout on this controller.
+ */
+ if (!adp->quiescent) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: controller cannot accept cmds due to "
+ "earlier errors\n" ));
+ return -EFAULT;
+ }
+
+ /*
+ * The following call will block till a kioc is available
+ */
+ kioc = mraid_mm_alloc_kioc(adp);
+
+ /*
+ * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
+ */
+ if ((rval = mimd_to_kioc(argp, adp, kioc))) {
+ mraid_mm_dealloc_kioc(adp, kioc);
+ return rval;
+ }
+
+ kioc->done = ioctl_done;
+
+ /*
+ * Issue the IOCTL to the low level driver. After the IOCTL completes
+ * release the kioc if and only if it was _not_ timedout. If it was
+ * timedout, that means that resources are still with low level driver.
+ */
+ if ((rval = lld_ioctl(adp, kioc))) {
+
+ if (!kioc->timedout)
+ mraid_mm_dealloc_kioc(adp, kioc);
+
+ return rval;
+ }
+
+ /*
+ * Convert the kioc back to user space
+ */
+ rval = kioc_to_mimd(kioc, argp);
+
+ /*
+ * Return the kioc to free pool
+ */
+ mraid_mm_dealloc_kioc(adp, kioc);
+
+ return rval;
+}
+
+static long
+mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+
+ /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
+ mutex_lock(&mraid_mm_mutex);
+ err = mraid_mm_ioctl(filep, cmd, arg);
+ mutex_unlock(&mraid_mm_mutex);
+
+ return err;
+}
+
+/**
+ * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
+ * @umimd : User space mimd_t ioctl packet
+ * @rval : returned success/error status
+ *
+ * The function return value is a pointer to the located @adapter.
+ */
+static mraid_mmadp_t *
+mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
+{
+ mraid_mmadp_t *adapter;
+ mimd_t mimd;
+ uint32_t adapno;
+ int iterator;
+
+
+ if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
+ *rval = -EFAULT;
+ return NULL;
+ }
+
+ adapno = GETADAP(mimd.ui.fcs.adapno);
+
+ if (adapno >= adapters_count_g) {
+ *rval = -ENODEV;
+ return NULL;
+ }
+
+ adapter = NULL;
+ iterator = 0;
+
+ list_for_each_entry(adapter, &adapters_list_g, list) {
+ if (iterator++ == adapno) break;
+ }
+
+ if (!adapter) {
+ *rval = -ENODEV;
+ return NULL;
+ }
+
+ return adapter;
+}
+
+/**
+ * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
+ * @arg : packet sent by the user app
+ * @old_ioctl : mimd if 1; uioc otherwise
+ * @rval : pointer for command's returned value (not function status)
+ */
+static int
+handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
+{
+ mimd_t __user *umimd;
+ mimd_t kmimd;
+ uint8_t opcode;
+ uint8_t subopcode;
+
+ if (old_ioctl)
+ goto old_packet;
+ else
+ goto new_packet;
+
+new_packet:
+ return (-ENOTSUPP);
+
+old_packet:
+ *rval = 0;
+ umimd = arg;
+
+ if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
+ return (-EFAULT);
+
+ opcode = kmimd.ui.fcs.opcode;
+ subopcode = kmimd.ui.fcs.subopcode;
+
+ /*
+ * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
+ * GET_NUMADP, then we can handle. Otherwise we should return 1 to
+ * indicate that we cannot handle this.
+ */
+ if (opcode != 0x82)
+ return 1;
+
+ switch (subopcode) {
+
+ case MEGAIOC_QDRVRVER:
+
+ if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
+ return (-EFAULT);
+
+ return 0;
+
+ case MEGAIOC_QNADAP:
+
+ *rval = adapters_count_g;
+
+ if (copy_to_user(kmimd.data, &adapters_count_g,
+ sizeof(uint32_t)))
+ return (-EFAULT);
+
+ return 0;
+
+ default:
+ /* cannot handle */
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * mimd_to_kioc - Converter from old to new ioctl format
+ * @umimd : user space old MIMD IOCTL
+ * @adp : adapter softstate
+ * @kioc : kernel space new format IOCTL
+ *
+ * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
+ * new packet is in kernel space so that driver can perform operations on it
+ * freely.
+ */
+
+static int
+mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
+{
+ mbox64_t *mbox64;
+ mbox_t *mbox;
+ mraid_passthru_t *pthru32;
+ uint32_t adapno;
+ uint8_t opcode;
+ uint8_t subopcode;
+ mimd_t mimd;
+
+ if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
+ return (-EFAULT);
+
+ /*
+ * Applications are not allowed to send extd pthru
+ */
+ if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
+ (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
+ return (-EINVAL);
+
+ opcode = mimd.ui.fcs.opcode;
+ subopcode = mimd.ui.fcs.subopcode;
+ adapno = GETADAP(mimd.ui.fcs.adapno);
+
+ if (adapno >= adapters_count_g)
+ return (-ENODEV);
+
+ kioc->adapno = adapno;
+ kioc->mb_type = MBOX_LEGACY;
+ kioc->app_type = APPTYPE_MIMD;
+
+ switch (opcode) {
+
+ case 0x82:
+
+ if (subopcode == MEGAIOC_QADAPINFO) {
+
+ kioc->opcode = GET_ADAP_INFO;
+ kioc->data_dir = UIOC_RD;
+ kioc->xferlen = sizeof(mraid_hba_info_t);
+
+ if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
+ return (-ENOMEM);
+ }
+ else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: Invalid subop\n"));
+ return (-EINVAL);
+ }
+
+ break;
+
+ case 0x81:
+
+ kioc->opcode = MBOX_CMD;
+ kioc->xferlen = mimd.ui.fcs.length;
+ kioc->user_data_len = kioc->xferlen;
+ kioc->user_data = mimd.ui.fcs.buffer;
+
+ if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
+ return (-ENOMEM);
+
+ if (mimd.outlen) kioc->data_dir = UIOC_RD;
+ if (mimd.inlen) kioc->data_dir |= UIOC_WR;
+
+ break;
+
+ case 0x80:
+
+ kioc->opcode = MBOX_CMD;
+ kioc->xferlen = (mimd.outlen > mimd.inlen) ?
+ mimd.outlen : mimd.inlen;
+ kioc->user_data_len = kioc->xferlen;
+ kioc->user_data = mimd.data;
+
+ if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
+ return (-ENOMEM);
+
+ if (mimd.outlen) kioc->data_dir = UIOC_RD;
+ if (mimd.inlen) kioc->data_dir |= UIOC_WR;
+
+ break;
+
+ default:
+ return (-EINVAL);
+ }
+
+ /*
+ * If driver command, nothing else to do
+ */
+ if (opcode == 0x82)
+ return 0;
+
+ /*
+ * This is a mailbox cmd; copy the mailbox from mimd
+ */
+ mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
+ mbox = &mbox64->mbox32;
+ memcpy(mbox, mimd.mbox, 14);
+
+ if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
+
+ mbox->xferaddr = (uint32_t)kioc->buf_paddr;
+
+ if (kioc->data_dir & UIOC_WR) {
+ if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
+ kioc->xferlen)) {
+ return (-EFAULT);
+ }
+ }
+
+ return 0;
+ }
+
+ /*
+ * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
+ * Just like in above case, the beginning for memblk is treated as
+ * a mailbox. The passthru will begin at next 1K boundary. And the
+ * data will start 1K after that.
+ */
+ pthru32 = kioc->pthru32;
+ kioc->user_pthru = &umimd->pthru;
+ mbox->xferaddr = (uint32_t)kioc->pthru32_h;
+
+ if (copy_from_user(pthru32, kioc->user_pthru,
+ sizeof(mraid_passthru_t))) {
+ return (-EFAULT);
+ }
+
+ pthru32->dataxferaddr = kioc->buf_paddr;
+ if (kioc->data_dir & UIOC_WR) {
+ if (pthru32->dataxferlen > kioc->xferlen)
+ return -EINVAL;
+ if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
+ pthru32->dataxferlen)) {
+ return (-EFAULT);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * mraid_mm_attch_buf - Attach a free dma buffer for required size
+ * @adp : Adapter softstate
+ * @kioc : kioc that the buffer needs to be attached to
+ * @xferlen : required length for buffer
+ *
+ * First we search for a pool with smallest buffer that is >= @xferlen. If
+ * that pool has no free buffer, we will try for the next bigger size. If none
+ * is available, we will try to allocate the smallest buffer that is >=
+ * @xferlen and attach it the pool.
+ */
+static int
+mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
+{
+ mm_dmapool_t *pool;
+ int right_pool = -1;
+ unsigned long flags;
+ int i;
+
+ kioc->pool_index = -1;
+ kioc->buf_vaddr = NULL;
+ kioc->buf_paddr = 0;
+ kioc->free_buf = 0;
+
+ /*
+ * We need xferlen amount of memory. See if we can get it from our
+ * dma pools. If we don't get exact size, we will try bigger buffer
+ */
+
+ for (i = 0; i < MAX_DMA_POOLS; i++) {
+
+ pool = &adp->dma_pool_list[i];
+
+ if (xferlen > pool->buf_size)
+ continue;
+
+ if (right_pool == -1)
+ right_pool = i;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ if (!pool->in_use) {
+
+ pool->in_use = 1;
+ kioc->pool_index = i;
+ kioc->buf_vaddr = pool->vaddr;
+ kioc->buf_paddr = pool->paddr;
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return 0;
+ }
+ else {
+ spin_unlock_irqrestore(&pool->lock, flags);
+ continue;
+ }
+ }
+
+ /*
+ * If xferlen doesn't match any of our pools, return error
+ */
+ if (right_pool == -1)
+ return -EINVAL;
+
+ /*
+ * We did not get any buffer from the preallocated pool. Let us try
+ * to allocate one new buffer. NOTE: This is a blocking call.
+ */
+ pool = &adp->dma_pool_list[right_pool];
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ kioc->pool_index = right_pool;
+ kioc->free_buf = 1;
+ kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
+ &kioc->buf_paddr);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ if (!kioc->buf_vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * mraid_mm_alloc_kioc - Returns a uioc_t from free list
+ * @adp : Adapter softstate for this module
+ *
+ * The kioc_semaphore is initialized with number of kioc nodes in the
+ * free kioc pool. If the kioc pool is empty, this function blocks till
+ * a kioc becomes free.
+ */
+static uioc_t *
+mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
+{
+ uioc_t *kioc;
+ struct list_head* head;
+ unsigned long flags;
+
+ down(&adp->kioc_semaphore);
+
+ spin_lock_irqsave(&adp->kioc_pool_lock, flags);
+
+ head = &adp->kioc_pool;
+
+ if (list_empty(head)) {
+ up(&adp->kioc_semaphore);
+ spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
+
+ con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
+ return NULL;
+ }
+
+ kioc = list_entry(head->next, uioc_t, list);
+ list_del_init(&kioc->list);
+
+ spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
+
+ memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
+ memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
+
+ kioc->buf_vaddr = NULL;
+ kioc->buf_paddr = 0;
+ kioc->pool_index =-1;
+ kioc->free_buf = 0;
+ kioc->user_data = NULL;
+ kioc->user_data_len = 0;
+ kioc->user_pthru = NULL;
+ kioc->timedout = 0;
+
+ return kioc;
+}
+
+/**
+ * mraid_mm_dealloc_kioc - Return kioc to free pool
+ * @adp : Adapter softstate
+ * @kioc : uioc_t node to be returned to free pool
+ */
+static void
+mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
+{
+ mm_dmapool_t *pool;
+ unsigned long flags;
+
+ if (kioc->pool_index != -1) {
+ pool = &adp->dma_pool_list[kioc->pool_index];
+
+ /* This routine may be called in non-isr context also */
+ spin_lock_irqsave(&pool->lock, flags);
+
+ /*
+ * While attaching the dma buffer, if we didn't get the
+ * required buffer from the pool, we would have allocated
+ * it at the run time and set the free_buf flag. We must
+ * free that buffer. Otherwise, just mark that the buffer is
+ * not in use
+ */
+ if (kioc->free_buf == 1)
+ pci_pool_free(pool->handle, kioc->buf_vaddr,
+ kioc->buf_paddr);
+ else
+ pool->in_use = 0;
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ }
+
+ /* Return the kioc to the free pool */
+ spin_lock_irqsave(&adp->kioc_pool_lock, flags);
+ list_add(&kioc->list, &adp->kioc_pool);
+ spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
+
+ /* increment the free kioc count */
+ up(&adp->kioc_semaphore);
+
+ return;
+}
+
+/**
+ * lld_ioctl - Routine to issue ioctl to low level drvr
+ * @adp : The adapter handle
+ * @kioc : The ioctl packet with kernel addresses
+ */
+static int
+lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
+{
+ int rval;
+ struct timer_list timer;
+ struct timer_list *tp = NULL;
+
+ kioc->status = -ENODATA;
+ rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
+
+ if (rval) return rval;
+
+ /*
+ * Start the timer
+ */
+ if (adp->timeout > 0) {
+ tp = &timer;
+ init_timer(tp);
+
+ tp->function = lld_timedout;
+ tp->data = (unsigned long)kioc;
+ tp->expires = jiffies + adp->timeout * HZ;
+
+ add_timer(tp);
+ }
+
+ /*
+ * Wait till the low level driver completes the ioctl. After this
+ * call, the ioctl either completed successfully or timedout.
+ */
+ wait_event(wait_q, (kioc->status != -ENODATA));
+ if (tp) {
+ del_timer_sync(tp);
+ }
+
+ /*
+ * If the command had timedout, we mark the controller offline
+ * before returning
+ */
+ if (kioc->timedout) {
+ adp->quiescent = 0;
+ }
+
+ return kioc->status;
+}
+
+
+/**
+ * ioctl_done - callback from the low level driver
+ * @kioc : completed ioctl packet
+ */
+static void
+ioctl_done(uioc_t *kioc)
+{
+ uint32_t adapno;
+ int iterator;
+ mraid_mmadp_t* adapter;
+
+ /*
+ * When the kioc returns from driver, make sure it still doesn't
+ * have ENODATA in status. Otherwise, driver will hang on wait_event
+ * forever
+ */
+ if (kioc->status == -ENODATA) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: lld didn't change status!\n"));
+
+ kioc->status = -EINVAL;
+ }
+
+ /*
+ * Check if this kioc was timedout before. If so, nobody is waiting
+ * on this kioc. We don't have to wake up anybody. Instead, we just
+ * have to free the kioc
+ */
+ if (kioc->timedout) {
+ iterator = 0;
+ adapter = NULL;
+ adapno = kioc->adapno;
+
+ con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
+ "ioctl that was timedout before\n"));
+
+ list_for_each_entry(adapter, &adapters_list_g, list) {
+ if (iterator++ == adapno) break;
+ }
+
+ kioc->timedout = 0;
+
+ if (adapter) {
+ mraid_mm_dealloc_kioc( adapter, kioc );
+ }
+ }
+ else {
+ wake_up(&wait_q);
+ }
+}
+
+
+/**
+ * lld_timedout - callback from the expired timer
+ * @ptr : ioctl packet that timed out
+ */
+static void
+lld_timedout(unsigned long ptr)
+{
+ uioc_t *kioc = (uioc_t *)ptr;
+
+ kioc->status = -ETIME;
+ kioc->timedout = 1;
+
+ con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
+
+ wake_up(&wait_q);
+}
+
+
+/**
+ * kioc_to_mimd - Converter from new back to old format
+ * @kioc : Kernel space IOCTL packet (successfully issued)
+ * @mimd : User space MIMD packet
+ */
+static int
+kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
+{
+ mimd_t kmimd;
+ uint8_t opcode;
+ uint8_t subopcode;
+
+ mbox64_t *mbox64;
+ mraid_passthru_t __user *upthru32;
+ mraid_passthru_t *kpthru32;
+ mcontroller_t cinfo;
+ mraid_hba_info_t *hinfo;
+
+
+ if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
+ return (-EFAULT);
+
+ opcode = kmimd.ui.fcs.opcode;
+ subopcode = kmimd.ui.fcs.subopcode;
+
+ if (opcode == 0x82) {
+ switch (subopcode) {
+
+ case MEGAIOC_QADAPINFO:
+
+ hinfo = (mraid_hba_info_t *)(unsigned long)
+ kioc->buf_vaddr;
+
+ hinfo_to_cinfo(hinfo, &cinfo);
+
+ if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
+ return (-EFAULT);
+
+ return 0;
+
+ default:
+ return (-EINVAL);
+ }
+
+ return 0;
+ }
+
+ mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
+
+ if (kioc->user_pthru) {
+
+ upthru32 = kioc->user_pthru;
+ kpthru32 = kioc->pthru32;
+
+ if (copy_to_user(&upthru32->scsistatus,
+ &kpthru32->scsistatus,
+ sizeof(uint8_t))) {
+ return (-EFAULT);
+ }
+ }
+
+ if (kioc->user_data) {
+ if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
+ kioc->user_data_len)) {
+ return (-EFAULT);
+ }
+ }
+
+ if (copy_to_user(&mimd->mbox[17],
+ &mbox64->mbox32.status, sizeof(uint8_t))) {
+ return (-EFAULT);
+ }
+
+ return 0;
+}
+
+
+/**
+ * hinfo_to_cinfo - Convert new format hba info into old format
+ * @hinfo : New format, more comprehensive adapter info
+ * @cinfo : Old format adapter info to support mimd_t apps
+ */
+static void
+hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
+{
+ if (!hinfo || !cinfo)
+ return;
+
+ cinfo->base = hinfo->baseport;
+ cinfo->irq = hinfo->irq;
+ cinfo->numldrv = hinfo->num_ldrv;
+ cinfo->pcibus = hinfo->pci_bus;
+ cinfo->pcidev = hinfo->pci_slot;
+ cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
+ cinfo->pciid = hinfo->pci_device_id;
+ cinfo->pcivendor = hinfo->pci_vendor_id;
+ cinfo->pcislot = hinfo->pci_slot;
+ cinfo->uid = hinfo->unique_id;
+}
+
+
+/**
+ * mraid_mm_register_adp - Registration routine for low level drivers
+ * @lld_adp : Adapter object
+ */
+int
+mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
+{
+ mraid_mmadp_t *adapter;
+ mbox64_t *mbox_list;
+ uioc_t *kioc;
+ uint32_t rval;
+ int i;
+
+
+ if (lld_adp->drvr_type != DRVRTYPE_MBOX)
+ return (-EINVAL);
+
+ adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
+
+ if (!adapter)
+ return -ENOMEM;
+
+
+ adapter->unique_id = lld_adp->unique_id;
+ adapter->drvr_type = lld_adp->drvr_type;
+ adapter->drvr_data = lld_adp->drvr_data;
+ adapter->pdev = lld_adp->pdev;
+ adapter->issue_uioc = lld_adp->issue_uioc;
+ adapter->timeout = lld_adp->timeout;
+ adapter->max_kioc = lld_adp->max_kioc;
+ adapter->quiescent = 1;
+
+ /*
+ * Allocate single blocks of memory for all required kiocs,
+ * mailboxes and passthru structures.
+ */
+ adapter->kioc_list = kmalloc(sizeof(uioc_t) * lld_adp->max_kioc,
+ GFP_KERNEL);
+ adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc,
+ GFP_KERNEL);
+ adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool",
+ adapter->pdev,
+ sizeof(mraid_passthru_t),
+ 16, 0);
+
+ if (!adapter->kioc_list || !adapter->mbox_list ||
+ !adapter->pthru_dma_pool) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: out of memory, %s %d\n", __func__,
+ __LINE__));
+
+ rval = (-ENOMEM);
+
+ goto memalloc_error;
+ }
+
+ /*
+ * Slice kioc_list and make a kioc_pool with the individiual kiocs
+ */
+ INIT_LIST_HEAD(&adapter->kioc_pool);
+ spin_lock_init(&adapter->kioc_pool_lock);
+ sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
+
+ mbox_list = (mbox64_t *)adapter->mbox_list;
+
+ for (i = 0; i < lld_adp->max_kioc; i++) {
+
+ kioc = adapter->kioc_list + i;
+ kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
+ kioc->pthru32 = pci_pool_alloc(adapter->pthru_dma_pool,
+ GFP_KERNEL, &kioc->pthru32_h);
+
+ if (!kioc->pthru32) {
+
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid cmm: out of memory, %s %d\n",
+ __func__, __LINE__));
+
+ rval = (-ENOMEM);
+
+ goto pthru_dma_pool_error;
+ }
+
+ list_add_tail(&kioc->list, &adapter->kioc_pool);
+ }
+
+ // Setup the dma pools for data buffers
+ if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
+ goto dma_pool_error;
+ }
+
+ list_add_tail(&adapter->list, &adapters_list_g);
+
+ adapters_count_g++;
+
+ return 0;
+
+dma_pool_error:
+ /* Do nothing */
+
+pthru_dma_pool_error:
+
+ for (i = 0; i < lld_adp->max_kioc; i++) {
+ kioc = adapter->kioc_list + i;
+ if (kioc->pthru32) {
+ pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
+ kioc->pthru32_h);
+ }
+ }
+
+memalloc_error:
+
+ kfree(adapter->kioc_list);
+ kfree(adapter->mbox_list);
+
+ if (adapter->pthru_dma_pool)
+ pci_pool_destroy(adapter->pthru_dma_pool);
+
+ kfree(adapter);
+
+ return rval;
+}
+
+
+/**
+ * mraid_mm_adapter_app_handle - return the application handle for this adapter
+ * @unique_id : adapter unique identifier
+ *
+ * For the given driver data, locate the adapter in our global list and
+ * return the corresponding handle, which is also used by applications to
+ * uniquely identify an adapter.
+ *
+ * Return adapter handle if found in the list.
+ * Return 0 if adapter could not be located, should never happen though.
+ */
+uint32_t
+mraid_mm_adapter_app_handle(uint32_t unique_id)
+{
+ mraid_mmadp_t *adapter;
+ mraid_mmadp_t *tmp;
+ int index = 0;
+
+ list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
+
+ if (adapter->unique_id == unique_id) {
+
+ return MKADAP(index);
+ }
+
+ index++;
+ }
+
+ return 0;
+}
+
+
+/**
+ * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
+ * @adp : Adapter softstate
+ *
+ * We maintain a pool of dma buffers per each adapter. Each pool has one
+ * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
+ * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
+ * dont' want to waste too much memory by allocating more buffers per each
+ * pool.
+ */
+static int
+mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
+{
+ mm_dmapool_t *pool;
+ int bufsize;
+ int i;
+
+ /*
+ * Create MAX_DMA_POOLS number of pools
+ */
+ bufsize = MRAID_MM_INIT_BUFF_SIZE;
+
+ for (i = 0; i < MAX_DMA_POOLS; i++){
+
+ pool = &adp->dma_pool_list[i];
+
+ pool->buf_size = bufsize;
+ spin_lock_init(&pool->lock);
+
+ pool->handle = pci_pool_create("megaraid mm data buffer",
+ adp->pdev, bufsize, 16, 0);
+
+ if (!pool->handle) {
+ goto dma_pool_setup_error;
+ }
+
+ pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
+ &pool->paddr);
+
+ if (!pool->vaddr)
+ goto dma_pool_setup_error;
+
+ bufsize = bufsize * 2;
+ }
+
+ return 0;
+
+dma_pool_setup_error:
+
+ mraid_mm_teardown_dma_pools(adp);
+ return (-ENOMEM);
+}
+
+
+/**
+ * mraid_mm_unregister_adp - Unregister routine for low level drivers
+ * @unique_id : UID of the adpater
+ *
+ * Assumes no outstanding ioctls to llds.
+ */
+int
+mraid_mm_unregister_adp(uint32_t unique_id)
+{
+ mraid_mmadp_t *adapter;
+ mraid_mmadp_t *tmp;
+
+ list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
+
+
+ if (adapter->unique_id == unique_id) {
+
+ adapters_count_g--;
+
+ list_del_init(&adapter->list);
+
+ mraid_mm_free_adp_resources(adapter);
+
+ kfree(adapter);
+
+ con_log(CL_ANN, (
+ "megaraid cmm: Unregistered one adapter:%#x\n",
+ unique_id));
+
+ return 0;
+ }
+ }
+
+ return (-ENODEV);
+}
+
+/**
+ * mraid_mm_free_adp_resources - Free adapter softstate
+ * @adp : Adapter softstate
+ */
+static void
+mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
+{
+ uioc_t *kioc;
+ int i;
+
+ mraid_mm_teardown_dma_pools(adp);
+
+ for (i = 0; i < adp->max_kioc; i++) {
+
+ kioc = adp->kioc_list + i;
+
+ pci_pool_free(adp->pthru_dma_pool, kioc->pthru32,
+ kioc->pthru32_h);
+ }
+
+ kfree(adp->kioc_list);
+ kfree(adp->mbox_list);
+
+ pci_pool_destroy(adp->pthru_dma_pool);
+
+
+ return;
+}
+
+
+/**
+ * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
+ * @adp : Adapter softstate
+ */
+static void
+mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
+{
+ int i;
+ mm_dmapool_t *pool;
+
+ for (i = 0; i < MAX_DMA_POOLS; i++) {
+
+ pool = &adp->dma_pool_list[i];
+
+ if (pool->handle) {
+
+ if (pool->vaddr)
+ pci_pool_free(pool->handle, pool->vaddr,
+ pool->paddr);
+
+ pci_pool_destroy(pool->handle);
+ pool->handle = NULL;
+ }
+ }
+
+ return;
+}
+
+/**
+ * mraid_mm_init - Module entry point
+ */
+static int __init
+mraid_mm_init(void)
+{
+ int err;
+
+ // Announce the driver version
+ con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
+ LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
+
+ err = misc_register(&megaraid_mm_dev);
+ if (err < 0) {
+ con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
+ return err;
+ }
+
+ init_waitqueue_head(&wait_q);
+
+ INIT_LIST_HEAD(&adapters_list_g);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_COMPAT
+/**
+ * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine
+ * @filep : file operations pointer (ignored)
+ * @cmd : ioctl command
+ * @arg : user ioctl packet
+ */
+static long
+mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+
+ err = mraid_mm_ioctl(filep, cmd, arg);
+
+ return err;
+}
+#endif
+
+/**
+ * mraid_mm_exit - Module exit point
+ */
+static void __exit
+mraid_mm_exit(void)
+{
+ con_log(CL_DLEVEL1 , ("exiting common mod\n"));
+
+ misc_deregister(&megaraid_mm_dev);
+}
+
+module_init(mraid_mm_init);
+module_exit(mraid_mm_exit);
+
+/* vi: set ts=8 sw=8 tw=78: */
diff --git a/kernel/drivers/scsi/megaraid/megaraid_mm.h b/kernel/drivers/scsi/megaraid/megaraid_mm.h
new file mode 100644
index 000000000..55b425c0a
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/megaraid_mm.h
@@ -0,0 +1,101 @@
+/*
+ *
+ * Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004 LSI Logic Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * FILE : megaraid_mm.h
+ */
+
+#ifndef MEGARAID_MM_H
+#define MEGARAID_MM_H
+
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+
+#include "mbox_defs.h"
+#include "megaraid_ioctl.h"
+
+
+#define LSI_COMMON_MOD_VERSION "2.20.2.7"
+#define LSI_COMMON_MOD_EXT_VERSION \
+ "(Release Date: Sun Jul 16 00:01:03 EST 2006)"
+
+
+#define LSI_DBGLVL dbglevel
+
+// The smallest dma pool
+#define MRAID_MM_INIT_BUFF_SIZE 4096
+
+/**
+ * mimd_t : Old style ioctl packet structure (deprecated)
+ *
+ * @inlen :
+ * @outlen :
+ * @fca :
+ * @opcode :
+ * @subopcode :
+ * @adapno :
+ * @buffer :
+ * @pad :
+ * @length :
+ * @mbox :
+ * @pthru :
+ * @data :
+ * @pad :
+ *
+ * Note : This structure is DEPRECATED. New applications must use
+ * : uioc_t structure instead. All new hba drivers use the new
+ * : format. If we get this mimd packet, we will convert it into
+ * : new uioc_t format and send it to the hba drivers.
+ */
+
+typedef struct mimd {
+
+ uint32_t inlen;
+ uint32_t outlen;
+
+ union {
+ uint8_t fca[16];
+ struct {
+ uint8_t opcode;
+ uint8_t subopcode;
+ uint16_t adapno;
+#if BITS_PER_LONG == 32
+ uint8_t __user *buffer;
+ uint8_t pad[4];
+#endif
+#if BITS_PER_LONG == 64
+ uint8_t __user *buffer;
+#endif
+ uint32_t length;
+ } __attribute__ ((packed)) fcs;
+ } __attribute__ ((packed)) ui;
+
+ uint8_t mbox[18]; /* 16 bytes + 2 status bytes */
+ mraid_passthru_t pthru;
+
+#if BITS_PER_LONG == 32
+ char __user *data; /* buffer <= 4096 for 0x80 commands */
+ char pad[4];
+#endif
+#if BITS_PER_LONG == 64
+ char __user *data;
+#endif
+
+} __attribute__ ((packed))mimd_t;
+
+#endif // MEGARAID_MM_H
+
+// vi: set ts=8 sw=8 tw=78:
diff --git a/kernel/drivers/scsi/megaraid/megaraid_sas.h b/kernel/drivers/scsi/megaraid/megaraid_sas.h
new file mode 100644
index 000000000..14e5c7cea
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/megaraid_sas.h
@@ -0,0 +1,1990 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2003-2013 LSI Corporation
+ * Copyright (c) 2013-2014 Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * FILE: megaraid_sas.h
+ *
+ * Authors: Avago Technologies
+ * Kashyap Desai <kashyap.desai@avagotech.com>
+ * Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ * San Jose, California 95131
+ */
+
+#ifndef LSI_MEGARAID_SAS_H
+#define LSI_MEGARAID_SAS_H
+
+/*
+ * MegaRAID SAS Driver meta data
+ */
+#define MEGASAS_VERSION "06.806.08.00-rc1"
+
+/*
+ * Device IDs
+ */
+#define PCI_DEVICE_ID_LSI_SAS1078R 0x0060
+#define PCI_DEVICE_ID_LSI_SAS1078DE 0x007C
+#define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413
+#define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078
+#define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079
+#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073
+#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071
+#define PCI_DEVICE_ID_LSI_FUSION 0x005b
+#define PCI_DEVICE_ID_LSI_PLASMA 0x002f
+#define PCI_DEVICE_ID_LSI_INVADER 0x005d
+#define PCI_DEVICE_ID_LSI_FURY 0x005f
+
+/*
+ * Intel HBA SSDIDs
+ */
+#define MEGARAID_INTEL_RS3DC080_SSDID 0x9360
+#define MEGARAID_INTEL_RS3DC040_SSDID 0x9362
+#define MEGARAID_INTEL_RS3SC008_SSDID 0x9380
+#define MEGARAID_INTEL_RS3MC044_SSDID 0x9381
+#define MEGARAID_INTEL_RS3WC080_SSDID 0x9341
+#define MEGARAID_INTEL_RS3WC040_SSDID 0x9343
+
+/*
+ * Intel HBA branding
+ */
+#define MEGARAID_INTEL_RS3DC080_BRANDING \
+ "Intel(R) RAID Controller RS3DC080"
+#define MEGARAID_INTEL_RS3DC040_BRANDING \
+ "Intel(R) RAID Controller RS3DC040"
+#define MEGARAID_INTEL_RS3SC008_BRANDING \
+ "Intel(R) RAID Controller RS3SC008"
+#define MEGARAID_INTEL_RS3MC044_BRANDING \
+ "Intel(R) RAID Controller RS3MC044"
+#define MEGARAID_INTEL_RS3WC080_BRANDING \
+ "Intel(R) RAID Controller RS3WC080"
+#define MEGARAID_INTEL_RS3WC040_BRANDING \
+ "Intel(R) RAID Controller RS3WC040"
+
+/*
+ * =====================================
+ * MegaRAID SAS MFI firmware definitions
+ * =====================================
+ */
+
+/*
+ * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for
+ * protocol between the software and firmware. Commands are issued using
+ * "message frames"
+ */
+
+/*
+ * FW posts its state in upper 4 bits of outbound_msg_0 register
+ */
+#define MFI_STATE_MASK 0xF0000000
+#define MFI_STATE_UNDEFINED 0x00000000
+#define MFI_STATE_BB_INIT 0x10000000
+#define MFI_STATE_FW_INIT 0x40000000
+#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
+#define MFI_STATE_FW_INIT_2 0x70000000
+#define MFI_STATE_DEVICE_SCAN 0x80000000
+#define MFI_STATE_BOOT_MESSAGE_PENDING 0x90000000
+#define MFI_STATE_FLUSH_CACHE 0xA0000000
+#define MFI_STATE_READY 0xB0000000
+#define MFI_STATE_OPERATIONAL 0xC0000000
+#define MFI_STATE_FAULT 0xF0000000
+#define MFI_STATE_FORCE_OCR 0x00000080
+#define MFI_STATE_DMADONE 0x00000008
+#define MFI_STATE_CRASH_DUMP_DONE 0x00000004
+#define MFI_RESET_REQUIRED 0x00000001
+#define MFI_RESET_ADAPTER 0x00000002
+#define MEGAMFI_FRAME_SIZE 64
+
+/*
+ * During FW init, clear pending cmds & reset state using inbound_msg_0
+ *
+ * ABORT : Abort all pending cmds
+ * READY : Move from OPERATIONAL to READY state; discard queue info
+ * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
+ * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
+ * HOTPLUG : Resume from Hotplug
+ * MFI_STOP_ADP : Send signal to FW to stop processing
+ */
+#define WRITE_SEQUENCE_OFFSET (0x0000000FC) /* I20 */
+#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) /* I20 */
+#define DIAG_WRITE_ENABLE (0x00000080)
+#define DIAG_RESET_ADAPTER (0x00000004)
+
+#define MFI_ADP_RESET 0x00000040
+#define MFI_INIT_ABORT 0x00000001
+#define MFI_INIT_READY 0x00000002
+#define MFI_INIT_MFIMODE 0x00000004
+#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
+#define MFI_INIT_HOTPLUG 0x00000010
+#define MFI_STOP_ADP 0x00000020
+#define MFI_RESET_FLAGS MFI_INIT_READY| \
+ MFI_INIT_MFIMODE| \
+ MFI_INIT_ABORT
+
+/*
+ * MFI frame flags
+ */
+#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
+#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
+#define MFI_FRAME_SGL32 0x0000
+#define MFI_FRAME_SGL64 0x0002
+#define MFI_FRAME_SENSE32 0x0000
+#define MFI_FRAME_SENSE64 0x0004
+#define MFI_FRAME_DIR_NONE 0x0000
+#define MFI_FRAME_DIR_WRITE 0x0008
+#define MFI_FRAME_DIR_READ 0x0010
+#define MFI_FRAME_DIR_BOTH 0x0018
+#define MFI_FRAME_IEEE 0x0020
+
+/*
+ * Definition for cmd_status
+ */
+#define MFI_CMD_STATUS_POLL_MODE 0xFF
+
+/*
+ * MFI command opcodes
+ */
+#define MFI_CMD_INIT 0x00
+#define MFI_CMD_LD_READ 0x01
+#define MFI_CMD_LD_WRITE 0x02
+#define MFI_CMD_LD_SCSI_IO 0x03
+#define MFI_CMD_PD_SCSI_IO 0x04
+#define MFI_CMD_DCMD 0x05
+#define MFI_CMD_ABORT 0x06
+#define MFI_CMD_SMP 0x07
+#define MFI_CMD_STP 0x08
+#define MFI_CMD_INVALID 0xff
+
+#define MR_DCMD_CTRL_GET_INFO 0x01010000
+#define MR_DCMD_LD_GET_LIST 0x03010000
+#define MR_DCMD_LD_LIST_QUERY 0x03010100
+
+#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
+#define MR_FLUSH_CTRL_CACHE 0x01
+#define MR_FLUSH_DISK_CACHE 0x02
+
+#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
+#define MR_DCMD_HIBERNATE_SHUTDOWN 0x01060000
+#define MR_ENABLE_DRIVE_SPINDOWN 0x01
+
+#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
+#define MR_DCMD_CTRL_EVENT_GET 0x01040300
+#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500
+#define MR_DCMD_LD_GET_PROPERTIES 0x03030000
+
+#define MR_DCMD_CLUSTER 0x08000000
+#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
+#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
+#define MR_DCMD_PD_LIST_QUERY 0x02010100
+
+#define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS 0x01190100
+#define MR_DRIVER_SET_APP_CRASHDUMP_MODE (0xF0010000 | 0x0600)
+
+/*
+ * Global functions
+ */
+extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
+
+
+/*
+ * MFI command completion codes
+ */
+enum MFI_STAT {
+ MFI_STAT_OK = 0x00,
+ MFI_STAT_INVALID_CMD = 0x01,
+ MFI_STAT_INVALID_DCMD = 0x02,
+ MFI_STAT_INVALID_PARAMETER = 0x03,
+ MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
+ MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
+ MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
+ MFI_STAT_APP_IN_USE = 0x07,
+ MFI_STAT_APP_NOT_INITIALIZED = 0x08,
+ MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
+ MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
+ MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
+ MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
+ MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
+ MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
+ MFI_STAT_FLASH_BUSY = 0x0f,
+ MFI_STAT_FLASH_ERROR = 0x10,
+ MFI_STAT_FLASH_IMAGE_BAD = 0x11,
+ MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
+ MFI_STAT_FLASH_NOT_OPEN = 0x13,
+ MFI_STAT_FLASH_NOT_STARTED = 0x14,
+ MFI_STAT_FLUSH_FAILED = 0x15,
+ MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
+ MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
+ MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
+ MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
+ MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
+ MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
+ MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
+ MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
+ MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
+ MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
+ MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
+ MFI_STAT_MFC_HW_ERROR = 0x21,
+ MFI_STAT_NO_HW_PRESENT = 0x22,
+ MFI_STAT_NOT_FOUND = 0x23,
+ MFI_STAT_NOT_IN_ENCL = 0x24,
+ MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
+ MFI_STAT_PD_TYPE_WRONG = 0x26,
+ MFI_STAT_PR_DISABLED = 0x27,
+ MFI_STAT_ROW_INDEX_INVALID = 0x28,
+ MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
+ MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
+ MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
+ MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
+ MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
+ MFI_STAT_SCSI_IO_FAILED = 0x2e,
+ MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
+ MFI_STAT_SHUTDOWN_FAILED = 0x30,
+ MFI_STAT_TIME_NOT_SET = 0x31,
+ MFI_STAT_WRONG_STATE = 0x32,
+ MFI_STAT_LD_OFFLINE = 0x33,
+ MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34,
+ MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35,
+ MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
+ MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
+ MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
+ MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67,
+
+ MFI_STAT_INVALID_STATUS = 0xFF
+};
+
+/*
+ * Crash dump related defines
+ */
+#define MAX_CRASH_DUMP_SIZE 512
+#define CRASH_DMA_BUF_SIZE (1024 * 1024)
+
+enum MR_FW_CRASH_DUMP_STATE {
+ UNAVAILABLE = 0,
+ AVAILABLE = 1,
+ COPYING = 2,
+ COPIED = 3,
+ COPY_ERROR = 4,
+};
+
+enum _MR_CRASH_BUF_STATUS {
+ MR_CRASH_BUF_TURN_OFF = 0,
+ MR_CRASH_BUF_TURN_ON = 1,
+};
+
+/*
+ * Number of mailbox bytes in DCMD message frame
+ */
+#define MFI_MBOX_SIZE 12
+
+enum MR_EVT_CLASS {
+
+ MR_EVT_CLASS_DEBUG = -2,
+ MR_EVT_CLASS_PROGRESS = -1,
+ MR_EVT_CLASS_INFO = 0,
+ MR_EVT_CLASS_WARNING = 1,
+ MR_EVT_CLASS_CRITICAL = 2,
+ MR_EVT_CLASS_FATAL = 3,
+ MR_EVT_CLASS_DEAD = 4,
+
+};
+
+enum MR_EVT_LOCALE {
+
+ MR_EVT_LOCALE_LD = 0x0001,
+ MR_EVT_LOCALE_PD = 0x0002,
+ MR_EVT_LOCALE_ENCL = 0x0004,
+ MR_EVT_LOCALE_BBU = 0x0008,
+ MR_EVT_LOCALE_SAS = 0x0010,
+ MR_EVT_LOCALE_CTRL = 0x0020,
+ MR_EVT_LOCALE_CONFIG = 0x0040,
+ MR_EVT_LOCALE_CLUSTER = 0x0080,
+ MR_EVT_LOCALE_ALL = 0xffff,
+
+};
+
+enum MR_EVT_ARGS {
+
+ MR_EVT_ARGS_NONE,
+ MR_EVT_ARGS_CDB_SENSE,
+ MR_EVT_ARGS_LD,
+ MR_EVT_ARGS_LD_COUNT,
+ MR_EVT_ARGS_LD_LBA,
+ MR_EVT_ARGS_LD_OWNER,
+ MR_EVT_ARGS_LD_LBA_PD_LBA,
+ MR_EVT_ARGS_LD_PROG,
+ MR_EVT_ARGS_LD_STATE,
+ MR_EVT_ARGS_LD_STRIP,
+ MR_EVT_ARGS_PD,
+ MR_EVT_ARGS_PD_ERR,
+ MR_EVT_ARGS_PD_LBA,
+ MR_EVT_ARGS_PD_LBA_LD,
+ MR_EVT_ARGS_PD_PROG,
+ MR_EVT_ARGS_PD_STATE,
+ MR_EVT_ARGS_PCI,
+ MR_EVT_ARGS_RATE,
+ MR_EVT_ARGS_STR,
+ MR_EVT_ARGS_TIME,
+ MR_EVT_ARGS_ECC,
+ MR_EVT_ARGS_LD_PROP,
+ MR_EVT_ARGS_PD_SPARE,
+ MR_EVT_ARGS_PD_INDEX,
+ MR_EVT_ARGS_DIAG_PASS,
+ MR_EVT_ARGS_DIAG_FAIL,
+ MR_EVT_ARGS_PD_LBA_LBA,
+ MR_EVT_ARGS_PORT_PHY,
+ MR_EVT_ARGS_PD_MISSING,
+ MR_EVT_ARGS_PD_ADDRESS,
+ MR_EVT_ARGS_BITMAP,
+ MR_EVT_ARGS_CONNECTOR,
+ MR_EVT_ARGS_PD_PD,
+ MR_EVT_ARGS_PD_FRU,
+ MR_EVT_ARGS_PD_PATHINFO,
+ MR_EVT_ARGS_PD_POWER_STATE,
+ MR_EVT_ARGS_GENERIC,
+};
+
+/*
+ * define constants for device list query options
+ */
+enum MR_PD_QUERY_TYPE {
+ MR_PD_QUERY_TYPE_ALL = 0,
+ MR_PD_QUERY_TYPE_STATE = 1,
+ MR_PD_QUERY_TYPE_POWER_STATE = 2,
+ MR_PD_QUERY_TYPE_MEDIA_TYPE = 3,
+ MR_PD_QUERY_TYPE_SPEED = 4,
+ MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
+};
+
+enum MR_LD_QUERY_TYPE {
+ MR_LD_QUERY_TYPE_ALL = 0,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1,
+ MR_LD_QUERY_TYPE_USED_TGT_IDS = 2,
+ MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3,
+ MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4,
+};
+
+
+#define MR_EVT_CFG_CLEARED 0x0004
+#define MR_EVT_LD_STATE_CHANGE 0x0051
+#define MR_EVT_PD_INSERTED 0x005b
+#define MR_EVT_PD_REMOVED 0x0070
+#define MR_EVT_LD_CREATED 0x008a
+#define MR_EVT_LD_DELETED 0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
+#define MR_EVT_LD_OFFLINE 0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
+
+enum MR_PD_STATE {
+ MR_PD_STATE_UNCONFIGURED_GOOD = 0x00,
+ MR_PD_STATE_UNCONFIGURED_BAD = 0x01,
+ MR_PD_STATE_HOT_SPARE = 0x02,
+ MR_PD_STATE_OFFLINE = 0x10,
+ MR_PD_STATE_FAILED = 0x11,
+ MR_PD_STATE_REBUILD = 0x14,
+ MR_PD_STATE_ONLINE = 0x18,
+ MR_PD_STATE_COPYBACK = 0x20,
+ MR_PD_STATE_SYSTEM = 0x40,
+ };
+
+
+ /*
+ * defines the physical drive address structure
+ */
+struct MR_PD_ADDRESS {
+ u16 deviceId;
+ u16 enclDeviceId;
+
+ union {
+ struct {
+ u8 enclIndex;
+ u8 slotNumber;
+ } mrPdAddress;
+ struct {
+ u8 enclPosition;
+ u8 enclConnectorIndex;
+ } mrEnclAddress;
+ };
+ u8 scsiDevType;
+ union {
+ u8 connectedPortBitmap;
+ u8 connectedPortNumbers;
+ };
+ u64 sasAddr[2];
+} __packed;
+
+/*
+ * defines the physical drive list structure
+ */
+struct MR_PD_LIST {
+ u32 size;
+ u32 count;
+ struct MR_PD_ADDRESS addr[1];
+} __packed;
+
+struct megasas_pd_list {
+ u16 tid;
+ u8 driveType;
+ u8 driveState;
+} __packed;
+
+ /*
+ * defines the logical drive reference structure
+ */
+union MR_LD_REF {
+ struct {
+ u8 targetId;
+ u8 reserved;
+ u16 seqNum;
+ };
+ u32 ref;
+} __packed;
+
+/*
+ * defines the logical drive list structure
+ */
+struct MR_LD_LIST {
+ u32 ldCount;
+ u32 reserved;
+ struct {
+ union MR_LD_REF ref;
+ u8 state;
+ u8 reserved[3];
+ u64 size;
+ } ldList[MAX_LOGICAL_DRIVES_EXT];
+} __packed;
+
+struct MR_LD_TARGETID_LIST {
+ u32 size;
+ u32 count;
+ u8 pad[3];
+ u8 targetId[MAX_LOGICAL_DRIVES_EXT];
+};
+
+
+/*
+ * SAS controller properties
+ */
+struct megasas_ctrl_prop {
+
+ u16 seq_num;
+ u16 pred_fail_poll_interval;
+ u16 intr_throttle_count;
+ u16 intr_throttle_timeouts;
+ u8 rebuild_rate;
+ u8 patrol_read_rate;
+ u8 bgi_rate;
+ u8 cc_rate;
+ u8 recon_rate;
+ u8 cache_flush_interval;
+ u8 spinup_drv_count;
+ u8 spinup_delay;
+ u8 cluster_enable;
+ u8 coercion_mode;
+ u8 alarm_enable;
+ u8 disable_auto_rebuild;
+ u8 disable_battery_warn;
+ u8 ecc_bucket_size;
+ u16 ecc_bucket_leak_rate;
+ u8 restore_hotspare_on_insertion;
+ u8 expose_encl_devices;
+ u8 maintainPdFailHistory;
+ u8 disallowHostRequestReordering;
+ u8 abortCCOnError;
+ u8 loadBalanceMode;
+ u8 disableAutoDetectBackplane;
+
+ u8 snapVDSpace;
+
+ /*
+ * Add properties that can be controlled by
+ * a bit in the following structure.
+ */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:18;
+ u32 enableJBOD:1;
+ u32 disableSpinDownHS:1;
+ u32 allowBootWithPinnedCache:1;
+ u32 disableOnlineCtrlReset:1;
+ u32 enableSecretKeyControl:1;
+ u32 autoEnhancedImport:1;
+ u32 enableSpinDownUnconfigured:1;
+ u32 SSDPatrolReadEnabled:1;
+ u32 SSDSMARTerEnabled:1;
+ u32 disableNCQ:1;
+ u32 useFdeOnly:1;
+ u32 prCorrectUnconfiguredAreas:1;
+ u32 SMARTerEnabled:1;
+ u32 copyBackDisabled:1;
+#else
+ u32 copyBackDisabled:1;
+ u32 SMARTerEnabled:1;
+ u32 prCorrectUnconfiguredAreas:1;
+ u32 useFdeOnly:1;
+ u32 disableNCQ:1;
+ u32 SSDSMARTerEnabled:1;
+ u32 SSDPatrolReadEnabled:1;
+ u32 enableSpinDownUnconfigured:1;
+ u32 autoEnhancedImport:1;
+ u32 enableSecretKeyControl:1;
+ u32 disableOnlineCtrlReset:1;
+ u32 allowBootWithPinnedCache:1;
+ u32 disableSpinDownHS:1;
+ u32 enableJBOD:1;
+ u32 reserved:18;
+#endif
+ } OnOffProperties;
+ u8 autoSnapVDSpace;
+ u8 viewSpace;
+ u16 spinDownTime;
+ u8 reserved[24];
+} __packed;
+
+/*
+ * SAS controller information
+ */
+struct megasas_ctrl_info {
+
+ /*
+ * PCI device information
+ */
+ struct {
+
+ u16 vendor_id;
+ u16 device_id;
+ u16 sub_vendor_id;
+ u16 sub_device_id;
+ u8 reserved[24];
+
+ } __attribute__ ((packed)) pci;
+
+ /*
+ * Host interface information
+ */
+ struct {
+
+ u8 PCIX:1;
+ u8 PCIE:1;
+ u8 iSCSI:1;
+ u8 SAS_3G:1;
+ u8 SRIOV:1;
+ u8 reserved_0:3;
+ u8 reserved_1[6];
+ u8 port_count;
+ u64 port_addr[8];
+
+ } __attribute__ ((packed)) host_interface;
+
+ /*
+ * Device (backend) interface information
+ */
+ struct {
+
+ u8 SPI:1;
+ u8 SAS_3G:1;
+ u8 SATA_1_5G:1;
+ u8 SATA_3G:1;
+ u8 reserved_0:4;
+ u8 reserved_1[6];
+ u8 port_count;
+ u64 port_addr[8];
+
+ } __attribute__ ((packed)) device_interface;
+
+ /*
+ * List of components residing in flash. All str are null terminated
+ */
+ u32 image_check_word;
+ u32 image_component_count;
+
+ struct {
+
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char built_time[16];
+
+ } __attribute__ ((packed)) image_component[8];
+
+ /*
+ * List of flash components that have been flashed on the card, but
+ * are not in use, pending reset of the adapter. This list will be
+ * empty if a flash operation has not occurred. All stings are null
+ * terminated
+ */
+ u32 pending_image_component_count;
+
+ struct {
+
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char build_time[16];
+
+ } __attribute__ ((packed)) pending_image_component[8];
+
+ u8 max_arms;
+ u8 max_spans;
+ u8 max_arrays;
+ u8 max_lds;
+
+ char product_name[80];
+ char serial_no[32];
+
+ /*
+ * Other physical/controller/operation information. Indicates the
+ * presence of the hardware
+ */
+ struct {
+
+ u32 bbu:1;
+ u32 alarm:1;
+ u32 nvram:1;
+ u32 uart:1;
+ u32 reserved:28;
+
+ } __attribute__ ((packed)) hw_present;
+
+ u32 current_fw_time;
+
+ /*
+ * Maximum data transfer sizes
+ */
+ u16 max_concurrent_cmds;
+ u16 max_sge_count;
+ u32 max_request_size;
+
+ /*
+ * Logical and physical device counts
+ */
+ u16 ld_present_count;
+ u16 ld_degraded_count;
+ u16 ld_offline_count;
+
+ u16 pd_present_count;
+ u16 pd_disk_present_count;
+ u16 pd_disk_pred_failure_count;
+ u16 pd_disk_failed_count;
+
+ /*
+ * Memory size information
+ */
+ u16 nvram_size;
+ u16 memory_size;
+ u16 flash_size;
+
+ /*
+ * Error counters
+ */
+ u16 mem_correctable_error_count;
+ u16 mem_uncorrectable_error_count;
+
+ /*
+ * Cluster information
+ */
+ u8 cluster_permitted;
+ u8 cluster_active;
+
+ /*
+ * Additional max data transfer sizes
+ */
+ u16 max_strips_per_io;
+
+ /*
+ * Controller capabilities structures
+ */
+ struct {
+
+ u32 raid_level_0:1;
+ u32 raid_level_1:1;
+ u32 raid_level_5:1;
+ u32 raid_level_1E:1;
+ u32 raid_level_6:1;
+ u32 reserved:27;
+
+ } __attribute__ ((packed)) raid_levels;
+
+ struct {
+
+ u32 rbld_rate:1;
+ u32 cc_rate:1;
+ u32 bgi_rate:1;
+ u32 recon_rate:1;
+ u32 patrol_rate:1;
+ u32 alarm_control:1;
+ u32 cluster_supported:1;
+ u32 bbu:1;
+ u32 spanning_allowed:1;
+ u32 dedicated_hotspares:1;
+ u32 revertible_hotspares:1;
+ u32 foreign_config_import:1;
+ u32 self_diagnostic:1;
+ u32 mixed_redundancy_arr:1;
+ u32 global_hot_spares:1;
+ u32 reserved:17;
+
+ } __attribute__ ((packed)) adapter_operations;
+
+ struct {
+
+ u32 read_policy:1;
+ u32 write_policy:1;
+ u32 io_policy:1;
+ u32 access_policy:1;
+ u32 disk_cache_policy:1;
+ u32 reserved:27;
+
+ } __attribute__ ((packed)) ld_operations;
+
+ struct {
+
+ u8 min;
+ u8 max;
+ u8 reserved[2];
+
+ } __attribute__ ((packed)) stripe_sz_ops;
+
+ struct {
+
+ u32 force_online:1;
+ u32 force_offline:1;
+ u32 force_rebuild:1;
+ u32 reserved:29;
+
+ } __attribute__ ((packed)) pd_operations;
+
+ struct {
+
+ u32 ctrl_supports_sas:1;
+ u32 ctrl_supports_sata:1;
+ u32 allow_mix_in_encl:1;
+ u32 allow_mix_in_ld:1;
+ u32 allow_sata_in_cluster:1;
+ u32 reserved:27;
+
+ } __attribute__ ((packed)) pd_mix_support;
+
+ /*
+ * Define ECC single-bit-error bucket information
+ */
+ u8 ecc_bucket_count;
+ u8 reserved_2[11];
+
+ /*
+ * Include the controller properties (changeable items)
+ */
+ struct megasas_ctrl_prop properties;
+
+ /*
+ * Define FW pkg version (set in envt v'bles on OEM basis)
+ */
+ char package_version[0x60];
+
+
+ /*
+ * If adapterOperations.supportMoreThan8Phys is set,
+ * and deviceInterface.portCount is greater than 8,
+ * SAS Addrs for first 8 ports shall be populated in
+ * deviceInterface.portAddr, and the rest shall be
+ * populated in deviceInterfacePortAddr2.
+ */
+ u64 deviceInterfacePortAddr2[8]; /*6a0h */
+ u8 reserved3[128]; /*6e0h */
+
+ struct { /*760h */
+ u16 minPdRaidLevel_0:4;
+ u16 maxPdRaidLevel_0:12;
+
+ u16 minPdRaidLevel_1:4;
+ u16 maxPdRaidLevel_1:12;
+
+ u16 minPdRaidLevel_5:4;
+ u16 maxPdRaidLevel_5:12;
+
+ u16 minPdRaidLevel_1E:4;
+ u16 maxPdRaidLevel_1E:12;
+
+ u16 minPdRaidLevel_6:4;
+ u16 maxPdRaidLevel_6:12;
+
+ u16 minPdRaidLevel_10:4;
+ u16 maxPdRaidLevel_10:12;
+
+ u16 minPdRaidLevel_50:4;
+ u16 maxPdRaidLevel_50:12;
+
+ u16 minPdRaidLevel_60:4;
+ u16 maxPdRaidLevel_60:12;
+
+ u16 minPdRaidLevel_1E_RLQ0:4;
+ u16 maxPdRaidLevel_1E_RLQ0:12;
+
+ u16 minPdRaidLevel_1E0_RLQ0:4;
+ u16 maxPdRaidLevel_1E0_RLQ0:12;
+
+ u16 reserved[6];
+ } pdsForRaidLevels;
+
+ u16 maxPds; /*780h */
+ u16 maxDedHSPs; /*782h */
+ u16 maxGlobalHSPs; /*784h */
+ u16 ddfSize; /*786h */
+ u8 maxLdsPerArray; /*788h */
+ u8 partitionsInDDF; /*789h */
+ u8 lockKeyBinding; /*78ah */
+ u8 maxPITsPerLd; /*78bh */
+ u8 maxViewsPerLd; /*78ch */
+ u8 maxTargetId; /*78dh */
+ u16 maxBvlVdSize; /*78eh */
+
+ u16 maxConfigurableSSCSize; /*790h */
+ u16 currentSSCsize; /*792h */
+
+ char expanderFwVersion[12]; /*794h */
+
+ u16 PFKTrialTimeRemaining; /*7A0h */
+
+ u16 cacheMemorySize; /*7A2h */
+
+ struct { /*7A4h */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:5;
+ u32 activePassive:2;
+ u32 supportConfigAutoBalance:1;
+ u32 mpio:1;
+ u32 supportDataLDonSSCArray:1;
+ u32 supportPointInTimeProgress:1;
+ u32 supportUnevenSpans:1;
+ u32 dedicatedHotSparesLimited:1;
+ u32 headlessMode:1;
+ u32 supportEmulatedDrives:1;
+ u32 supportResetNow:1;
+ u32 realTimeScheduler:1;
+ u32 supportSSDPatrolRead:1;
+ u32 supportPerfTuning:1;
+ u32 disableOnlinePFKChange:1;
+ u32 supportJBOD:1;
+ u32 supportBootTimePFKChange:1;
+ u32 supportSetLinkSpeed:1;
+ u32 supportEmergencySpares:1;
+ u32 supportSuspendResumeBGops:1;
+ u32 blockSSDWriteCacheChange:1;
+ u32 supportShieldState:1;
+ u32 supportLdBBMInfo:1;
+ u32 supportLdPIType3:1;
+ u32 supportLdPIType2:1;
+ u32 supportLdPIType1:1;
+ u32 supportPIcontroller:1;
+#else
+ u32 supportPIcontroller:1;
+ u32 supportLdPIType1:1;
+ u32 supportLdPIType2:1;
+ u32 supportLdPIType3:1;
+ u32 supportLdBBMInfo:1;
+ u32 supportShieldState:1;
+ u32 blockSSDWriteCacheChange:1;
+ u32 supportSuspendResumeBGops:1;
+ u32 supportEmergencySpares:1;
+ u32 supportSetLinkSpeed:1;
+ u32 supportBootTimePFKChange:1;
+ u32 supportJBOD:1;
+ u32 disableOnlinePFKChange:1;
+ u32 supportPerfTuning:1;
+ u32 supportSSDPatrolRead:1;
+ u32 realTimeScheduler:1;
+
+ u32 supportResetNow:1;
+ u32 supportEmulatedDrives:1;
+ u32 headlessMode:1;
+ u32 dedicatedHotSparesLimited:1;
+
+
+ u32 supportUnevenSpans:1;
+ u32 supportPointInTimeProgress:1;
+ u32 supportDataLDonSSCArray:1;
+ u32 mpio:1;
+ u32 supportConfigAutoBalance:1;
+ u32 activePassive:2;
+ u32 reserved:5;
+#endif
+ } adapterOperations2;
+
+ u8 driverVersion[32]; /*7A8h */
+ u8 maxDAPdCountSpinup60; /*7C8h */
+ u8 temperatureROC; /*7C9h */
+ u8 temperatureCtrl; /*7CAh */
+ u8 reserved4; /*7CBh */
+ u16 maxConfigurablePds; /*7CCh */
+
+
+ u8 reserved5[2]; /*0x7CDh */
+
+ /*
+ * HA cluster information
+ */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:26;
+ u32 premiumFeatureMismatch:1;
+ u32 ctrlPropIncompatible:1;
+ u32 fwVersionMismatch:1;
+ u32 hwIncompatible:1;
+ u32 peerIsIncompatible:1;
+ u32 peerIsPresent:1;
+#else
+ u32 peerIsPresent:1;
+ u32 peerIsIncompatible:1;
+ u32 hwIncompatible:1;
+ u32 fwVersionMismatch:1;
+ u32 ctrlPropIncompatible:1;
+ u32 premiumFeatureMismatch:1;
+ u32 reserved:26;
+#endif
+ } cluster;
+
+ char clusterId[16]; /*7D4h */
+ struct {
+ u8 maxVFsSupported; /*0x7E4*/
+ u8 numVFsEnabled; /*0x7E5*/
+ u8 requestorId; /*0x7E6 0:PF, 1:VF1, 2:VF2*/
+ u8 reserved; /*0x7E7*/
+ } iov;
+
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:12;
+ u32 discardCacheDuringLDDelete:1;
+ u32 supportSecurityonJBOD:1;
+ u32 supportCacheBypassModes:1;
+ u32 supportDisableSESMonitoring:1;
+ u32 supportForceFlash:1;
+ u32 supportNVDRAM:1;
+ u32 supportDrvActivityLEDSetting:1;
+ u32 supportAllowedOpsforDrvRemoval:1;
+ u32 supportHOQRebuild:1;
+ u32 supportForceTo512e:1;
+ u32 supportNVCacheErase:1;
+ u32 supportDebugQueue:1;
+ u32 supportSwZone:1;
+ u32 supportCrashDump:1;
+ u32 supportMaxExtLDs:1;
+ u32 supportT10RebuildAssist:1;
+ u32 supportDisableImmediateIO:1;
+ u32 supportThermalPollInterval:1;
+ u32 supportPersonalityChange:2;
+#else
+ u32 supportPersonalityChange:2;
+ u32 supportThermalPollInterval:1;
+ u32 supportDisableImmediateIO:1;
+ u32 supportT10RebuildAssist:1;
+ u32 supportMaxExtLDs:1;
+ u32 supportCrashDump:1;
+ u32 supportSwZone:1;
+ u32 supportDebugQueue:1;
+ u32 supportNVCacheErase:1;
+ u32 supportForceTo512e:1;
+ u32 supportHOQRebuild:1;
+ u32 supportAllowedOpsforDrvRemoval:1;
+ u32 supportDrvActivityLEDSetting:1;
+ u32 supportNVDRAM:1;
+ u32 supportForceFlash:1;
+ u32 supportDisableSESMonitoring:1;
+ u32 supportCacheBypassModes:1;
+ u32 supportSecurityonJBOD:1;
+ u32 discardCacheDuringLDDelete:1;
+ u32 reserved:12;
+#endif
+ } adapterOperations3;
+
+ u8 pad[0x800-0x7EC];
+} __packed;
+
+/*
+ * ===============================
+ * MegaRAID SAS driver definitions
+ * ===============================
+ */
+#define MEGASAS_MAX_PD_CHANNELS 2
+#define MEGASAS_MAX_LD_CHANNELS 2
+#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
+ MEGASAS_MAX_LD_CHANNELS)
+#define MEGASAS_MAX_DEV_PER_CHANNEL 128
+#define MEGASAS_DEFAULT_INIT_ID -1
+#define MEGASAS_MAX_LUN 8
+#define MEGASAS_DEFAULT_CMD_PER_LUN 256
+#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
+ MEGASAS_MAX_DEV_PER_CHANNEL)
+#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
+ MEGASAS_MAX_DEV_PER_CHANNEL)
+
+#define MEGASAS_MAX_SECTORS (2*1024)
+#define MEGASAS_MAX_SECTORS_IEEE (2*128)
+#define MEGASAS_DBG_LVL 1
+
+#define MEGASAS_FW_BUSY 1
+
+#define VD_EXT_DEBUG 0
+
+enum MR_MFI_MPT_PTHR_FLAGS {
+ MFI_MPT_DETACHED = 0,
+ MFI_LIST_ADDED = 1,
+ MFI_MPT_ATTACHED = 2,
+};
+
+enum MR_SCSI_CMD_TYPE {
+ READ_WRITE_LDIO = 0,
+ NON_READ_WRITE_LDIO = 1,
+ READ_WRITE_SYSPDIO = 2,
+ NON_READ_WRITE_SYSPDIO = 3,
+};
+
+/* Frame Type */
+#define IO_FRAME 0
+#define PTHRU_FRAME 1
+
+/*
+ * When SCSI mid-layer calls driver's reset routine, driver waits for
+ * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
+ * that the driver cannot _actually_ abort or reset pending commands. While
+ * it is waiting for the commands to complete, it prints a diagnostic message
+ * every MEGASAS_RESET_NOTICE_INTERVAL seconds
+ */
+#define MEGASAS_RESET_WAIT_TIME 180
+#define MEGASAS_INTERNAL_CMD_WAIT_TIME 180
+#define MEGASAS_RESET_NOTICE_INTERVAL 5
+#define MEGASAS_IOCTL_CMD 0
+#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
+#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
+#define MEGASAS_BLOCKED_CMD_TIMEOUT 60
+/*
+ * FW reports the maximum of number of commands that it can accept (maximum
+ * commands that can be outstanding) at any time. The driver must report a
+ * lower number to the mid layer because it can issue a few internal commands
+ * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs
+ * is shown below
+ */
+#define MEGASAS_INT_CMDS 32
+#define MEGASAS_SKINNY_INT_CMDS 5
+#define MEGASAS_FUSION_INTERNAL_CMDS 5
+#define MEGASAS_FUSION_IOCTL_CMDS 3
+
+#define MEGASAS_MAX_MSIX_QUEUES 128
+/*
+ * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
+ * SGLs based on the size of dma_addr_t
+ */
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+
+#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001
+
+#define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001
+#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002
+#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004
+
+#define MFI_OB_INTR_STATUS_MASK 0x00000002
+#define MFI_POLL_TIMEOUT_SECS 60
+#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ)
+#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30)
+#define MEGASAS_ROUTINE_WAIT_TIME_VF 300
+#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
+#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
+#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004)
+#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000
+#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001)
+
+#define MFI_1068_PCSR_OFFSET 0x84
+#define MFI_1068_FW_HANDSHAKE_OFFSET 0x64
+#define MFI_1068_FW_READY 0xDDDD0000
+
+#define MR_MAX_REPLY_QUEUES_OFFSET 0X0000001F
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET 0X003FC000
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
+#define MR_MAX_MSIX_REG_ARRAY 16
+/*
+* register set for both 1068 and 1078 controllers
+* structure extended for 1078 registers
+*/
+
+struct megasas_register_set {
+ u32 doorbell; /*0000h*/
+ u32 fusion_seq_offset; /*0004h*/
+ u32 fusion_host_diag; /*0008h*/
+ u32 reserved_01; /*000Ch*/
+
+ u32 inbound_msg_0; /*0010h*/
+ u32 inbound_msg_1; /*0014h*/
+ u32 outbound_msg_0; /*0018h*/
+ u32 outbound_msg_1; /*001Ch*/
+
+ u32 inbound_doorbell; /*0020h*/
+ u32 inbound_intr_status; /*0024h*/
+ u32 inbound_intr_mask; /*0028h*/
+
+ u32 outbound_doorbell; /*002Ch*/
+ u32 outbound_intr_status; /*0030h*/
+ u32 outbound_intr_mask; /*0034h*/
+
+ u32 reserved_1[2]; /*0038h*/
+
+ u32 inbound_queue_port; /*0040h*/
+ u32 outbound_queue_port; /*0044h*/
+
+ u32 reserved_2[9]; /*0048h*/
+ u32 reply_post_host_index; /*006Ch*/
+ u32 reserved_2_2[12]; /*0070h*/
+
+ u32 outbound_doorbell_clear; /*00A0h*/
+
+ u32 reserved_3[3]; /*00A4h*/
+
+ u32 outbound_scratch_pad ; /*00B0h*/
+ u32 outbound_scratch_pad_2; /*00B4h*/
+
+ u32 reserved_4[2]; /*00B8h*/
+
+ u32 inbound_low_queue_port ; /*00C0h*/
+
+ u32 inbound_high_queue_port ; /*00C4h*/
+
+ u32 reserved_5; /*00C8h*/
+ u32 res_6[11]; /*CCh*/
+ u32 host_diag;
+ u32 seq_offset;
+ u32 index_registers[807]; /*00CCh*/
+} __attribute__ ((packed));
+
+struct megasas_sge32 {
+
+ u32 phys_addr;
+ u32 length;
+
+} __attribute__ ((packed));
+
+struct megasas_sge64 {
+
+ u64 phys_addr;
+ u32 length;
+
+} __attribute__ ((packed));
+
+struct megasas_sge_skinny {
+ u64 phys_addr;
+ u32 length;
+ u32 flag;
+} __packed;
+
+union megasas_sgl {
+
+ struct megasas_sge32 sge32[1];
+ struct megasas_sge64 sge64[1];
+ struct megasas_sge_skinny sge_skinny[1];
+
+} __attribute__ ((packed));
+
+struct megasas_header {
+
+ u8 cmd; /*00h */
+ u8 sense_len; /*01h */
+ u8 cmd_status; /*02h */
+ u8 scsi_status; /*03h */
+
+ u8 target_id; /*04h */
+ u8 lun; /*05h */
+ u8 cdb_len; /*06h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+ u32 data_xferlen; /*14h */
+
+} __attribute__ ((packed));
+
+union megasas_sgl_frame {
+
+ struct megasas_sge32 sge32[8];
+ struct megasas_sge64 sge64[5];
+
+} __attribute__ ((packed));
+
+typedef union _MFI_CAPABILITIES {
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:25;
+ u32 security_protocol_cmds_fw:1;
+ u32 support_core_affinity:1;
+ u32 support_ndrive_r1_lb:1;
+ u32 support_max_255lds:1;
+ u32 support_fastpath_wb:1;
+ u32 support_additional_msix:1;
+ u32 support_fp_remote_lun:1;
+#else
+ u32 support_fp_remote_lun:1;
+ u32 support_additional_msix:1;
+ u32 support_fastpath_wb:1;
+ u32 support_max_255lds:1;
+ u32 support_ndrive_r1_lb:1;
+ u32 support_core_affinity:1;
+ u32 security_protocol_cmds_fw:1;
+ u32 reserved:25;
+#endif
+ } mfi_capabilities;
+ u32 reg;
+} MFI_CAPABILITIES;
+
+struct megasas_init_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_0; /*01h */
+ u8 cmd_status; /*02h */
+
+ u8 reserved_1; /*03h */
+ MFI_CAPABILITIES driver_operations; /*04h*/
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 reserved_3; /*12h */
+ u32 data_xfer_len; /*14h */
+
+ u32 queue_info_new_phys_addr_lo; /*18h */
+ u32 queue_info_new_phys_addr_hi; /*1Ch */
+ u32 queue_info_old_phys_addr_lo; /*20h */
+ u32 queue_info_old_phys_addr_hi; /*24h */
+
+ u32 reserved_4[6]; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_init_queue_info {
+
+ u32 init_flags; /*00h */
+ u32 reply_queue_entries; /*04h */
+
+ u32 reply_queue_start_phys_addr_lo; /*08h */
+ u32 reply_queue_start_phys_addr_hi; /*0Ch */
+ u32 producer_index_phys_addr_lo; /*10h */
+ u32 producer_index_phys_addr_hi; /*14h */
+ u32 consumer_index_phys_addr_lo; /*18h */
+ u32 consumer_index_phys_addr_hi; /*1Ch */
+
+} __attribute__ ((packed));
+
+struct megasas_io_frame {
+
+ u8 cmd; /*00h */
+ u8 sense_len; /*01h */
+ u8 cmd_status; /*02h */
+ u8 scsi_status; /*03h */
+
+ u8 target_id; /*04h */
+ u8 access_byte; /*05h */
+ u8 reserved_0; /*06h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+ u32 lba_count; /*14h */
+
+ u32 sense_buf_phys_addr_lo; /*18h */
+ u32 sense_buf_phys_addr_hi; /*1Ch */
+
+ u32 start_lba_lo; /*20h */
+ u32 start_lba_hi; /*24h */
+
+ union megasas_sgl sgl; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_pthru_frame {
+
+ u8 cmd; /*00h */
+ u8 sense_len; /*01h */
+ u8 cmd_status; /*02h */
+ u8 scsi_status; /*03h */
+
+ u8 target_id; /*04h */
+ u8 lun; /*05h */
+ u8 cdb_len; /*06h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+ u32 data_xfer_len; /*14h */
+
+ u32 sense_buf_phys_addr_lo; /*18h */
+ u32 sense_buf_phys_addr_hi; /*1Ch */
+
+ u8 cdb[16]; /*20h */
+ union megasas_sgl sgl; /*30h */
+
+} __attribute__ ((packed));
+
+struct megasas_dcmd_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_0; /*01h */
+ u8 cmd_status; /*02h */
+ u8 reserved_1[4]; /*03h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+
+ u32 data_xfer_len; /*14h */
+ u32 opcode; /*18h */
+
+ union { /*1Ch */
+ u8 b[12];
+ u16 s[6];
+ u32 w[3];
+ } mbox;
+
+ union megasas_sgl sgl; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_abort_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_0; /*01h */
+ u8 cmd_status; /*02h */
+
+ u8 reserved_1; /*03h */
+ u32 reserved_2; /*04h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 reserved_3; /*12h */
+ u32 reserved_4; /*14h */
+
+ u32 abort_context; /*18h */
+ u32 pad_1; /*1Ch */
+
+ u32 abort_mfi_phys_addr_lo; /*20h */
+ u32 abort_mfi_phys_addr_hi; /*24h */
+
+ u32 reserved_5[6]; /*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_smp_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_1; /*01h */
+ u8 cmd_status; /*02h */
+ u8 connection_status; /*03h */
+
+ u8 reserved_2[3]; /*04h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+
+ u32 data_xfer_len; /*14h */
+ u64 sas_addr; /*18h */
+
+ union {
+ struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */
+ struct megasas_sge64 sge64[2]; /* [0]: resp [1]: req */
+ } sgl;
+
+} __attribute__ ((packed));
+
+struct megasas_stp_frame {
+
+ u8 cmd; /*00h */
+ u8 reserved_1; /*01h */
+ u8 cmd_status; /*02h */
+ u8 reserved_2; /*03h */
+
+ u8 target_id; /*04h */
+ u8 reserved_3[2]; /*05h */
+ u8 sge_count; /*07h */
+
+ u32 context; /*08h */
+ u32 pad_0; /*0Ch */
+
+ u16 flags; /*10h */
+ u16 timeout; /*12h */
+
+ u32 data_xfer_len; /*14h */
+
+ u16 fis[10]; /*18h */
+ u32 stp_flags;
+
+ union {
+ struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */
+ struct megasas_sge64 sge64[2]; /* [0]: resp [1]: data */
+ } sgl;
+
+} __attribute__ ((packed));
+
+union megasas_frame {
+
+ struct megasas_header hdr;
+ struct megasas_init_frame init;
+ struct megasas_io_frame io;
+ struct megasas_pthru_frame pthru;
+ struct megasas_dcmd_frame dcmd;
+ struct megasas_abort_frame abort;
+ struct megasas_smp_frame smp;
+ struct megasas_stp_frame stp;
+
+ u8 raw_bytes[64];
+};
+
+struct megasas_cmd;
+
+union megasas_evt_class_locale {
+
+ struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+ u16 locale;
+ u8 reserved;
+ s8 class;
+#else
+ s8 class;
+ u8 reserved;
+ u16 locale;
+#endif
+ } __attribute__ ((packed)) members;
+
+ u32 word;
+
+} __attribute__ ((packed));
+
+struct megasas_evt_log_info {
+ u32 newest_seq_num;
+ u32 oldest_seq_num;
+ u32 clear_seq_num;
+ u32 shutdown_seq_num;
+ u32 boot_seq_num;
+
+} __attribute__ ((packed));
+
+struct megasas_progress {
+
+ u16 progress;
+ u16 elapsed_seconds;
+
+} __attribute__ ((packed));
+
+struct megasas_evtarg_ld {
+
+ u16 target_id;
+ u8 ld_index;
+ u8 reserved;
+
+} __attribute__ ((packed));
+
+struct megasas_evtarg_pd {
+ u16 device_id;
+ u8 encl_index;
+ u8 slot_number;
+
+} __attribute__ ((packed));
+
+struct megasas_evt_detail {
+
+ u32 seq_num;
+ u32 time_stamp;
+ u32 code;
+ union megasas_evt_class_locale cl;
+ u8 arg_type;
+ u8 reserved1[15];
+
+ union {
+ struct {
+ struct megasas_evtarg_pd pd;
+ u8 cdb_length;
+ u8 sense_length;
+ u8 reserved[2];
+ u8 cdb[16];
+ u8 sense[64];
+ } __attribute__ ((packed)) cdbSense;
+
+ struct megasas_evtarg_ld ld;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ u64 count;
+ } __attribute__ ((packed)) ld_count;
+
+ struct {
+ u64 lba;
+ struct megasas_evtarg_ld ld;
+ } __attribute__ ((packed)) ld_lba;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ u32 prevOwner;
+ u32 newOwner;
+ } __attribute__ ((packed)) ld_owner;
+
+ struct {
+ u64 ld_lba;
+ u64 pd_lba;
+ struct megasas_evtarg_ld ld;
+ struct megasas_evtarg_pd pd;
+ } __attribute__ ((packed)) ld_lba_pd_lba;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ struct megasas_progress prog;
+ } __attribute__ ((packed)) ld_prog;
+
+ struct {
+ struct megasas_evtarg_ld ld;
+ u32 prev_state;
+ u32 new_state;
+ } __attribute__ ((packed)) ld_state;
+
+ struct {
+ u64 strip;
+ struct megasas_evtarg_ld ld;
+ } __attribute__ ((packed)) ld_strip;
+
+ struct megasas_evtarg_pd pd;
+
+ struct {
+ struct megasas_evtarg_pd pd;
+ u32 err;
+ } __attribute__ ((packed)) pd_err;
+
+ struct {
+ u64 lba;
+ struct megasas_evtarg_pd pd;
+ } __attribute__ ((packed)) pd_lba;
+
+ struct {
+ u64 lba;
+ struct megasas_evtarg_pd pd;
+ struct megasas_evtarg_ld ld;
+ } __attribute__ ((packed)) pd_lba_ld;
+
+ struct {
+ struct megasas_evtarg_pd pd;
+ struct megasas_progress prog;
+ } __attribute__ ((packed)) pd_prog;
+
+ struct {
+ struct megasas_evtarg_pd pd;
+ u32 prevState;
+ u32 newState;
+ } __attribute__ ((packed)) pd_state;
+
+ struct {
+ u16 vendorId;
+ u16 deviceId;
+ u16 subVendorId;
+ u16 subDeviceId;
+ } __attribute__ ((packed)) pci;
+
+ u32 rate;
+ char str[96];
+
+ struct {
+ u32 rtc;
+ u32 elapsedSeconds;
+ } __attribute__ ((packed)) time;
+
+ struct {
+ u32 ecar;
+ u32 elog;
+ char str[64];
+ } __attribute__ ((packed)) ecc;
+
+ u8 b[96];
+ u16 s[48];
+ u32 w[24];
+ u64 d[12];
+ } args;
+
+ char description[128];
+
+} __attribute__ ((packed));
+
+struct megasas_aen_event {
+ struct delayed_work hotplug_work;
+ struct megasas_instance *instance;
+};
+
+struct megasas_irq_context {
+ struct megasas_instance *instance;
+ u32 MSIxIndex;
+};
+
+struct megasas_instance {
+
+ u32 *producer;
+ dma_addr_t producer_h;
+ u32 *consumer;
+ dma_addr_t consumer_h;
+ struct MR_LD_VF_AFFILIATION *vf_affiliation;
+ dma_addr_t vf_affiliation_h;
+ struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111;
+ dma_addr_t vf_affiliation_111_h;
+ struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
+ dma_addr_t hb_host_mem_h;
+
+ u32 *reply_queue;
+ dma_addr_t reply_queue_h;
+
+ u32 *crash_dump_buf;
+ dma_addr_t crash_dump_h;
+ void *crash_buf[MAX_CRASH_DUMP_SIZE];
+ u32 crash_buf_pages;
+ unsigned int fw_crash_buffer_size;
+ unsigned int fw_crash_state;
+ unsigned int fw_crash_buffer_offset;
+ u32 drv_buf_index;
+ u32 drv_buf_alloc;
+ u32 crash_dump_fw_support;
+ u32 crash_dump_drv_support;
+ u32 crash_dump_app_support;
+ u32 secure_jbod_support;
+ spinlock_t crashdump_lock;
+
+ struct megasas_register_set __iomem *reg_set;
+ u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
+ struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
+ struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
+ u8 ld_ids[MEGASAS_MAX_LD_IDS];
+ s8 init_id;
+
+ u16 max_num_sge;
+ u16 max_fw_cmds;
+ u16 max_mfi_cmds;
+ u16 max_scsi_cmds;
+ u32 max_sectors_per_req;
+ struct megasas_aen_event *ev;
+
+ struct megasas_cmd **cmd_list;
+ struct list_head cmd_pool;
+ /* used to sync fire the cmd to fw */
+ spinlock_t mfi_pool_lock;
+ /* used to sync fire the cmd to fw */
+ spinlock_t hba_lock;
+ /* used to synch producer, consumer ptrs in dpc */
+ spinlock_t completion_lock;
+ struct dma_pool *frame_dma_pool;
+ struct dma_pool *sense_dma_pool;
+
+ struct megasas_evt_detail *evt_detail;
+ dma_addr_t evt_detail_h;
+ struct megasas_cmd *aen_cmd;
+ struct mutex aen_mutex;
+ struct semaphore ioctl_sem;
+
+ struct Scsi_Host *host;
+
+ wait_queue_head_t int_cmd_wait_q;
+ wait_queue_head_t abort_cmd_wait_q;
+
+ struct pci_dev *pdev;
+ u32 unique_id;
+ u32 fw_support_ieee;
+
+ atomic_t fw_outstanding;
+ atomic_t fw_reset_no_pci_access;
+
+ struct megasas_instance_template *instancet;
+ struct tasklet_struct isr_tasklet;
+ struct work_struct work_init;
+ struct work_struct crash_init;
+
+ u8 flag;
+ u8 unload;
+ u8 flag_ieee;
+ u8 issuepend_done;
+ u8 disableOnlineCtrlReset;
+ u8 UnevenSpanSupport;
+
+ u8 supportmax256vd;
+ u16 fw_supported_vd_count;
+ u16 fw_supported_pd_count;
+
+ u16 drv_supported_vd_count;
+ u16 drv_supported_pd_count;
+
+ u8 adprecovery;
+ unsigned long last_time;
+ u32 mfiStatus;
+ u32 last_seq_num;
+
+ struct list_head internal_reset_pending_q;
+
+ /* Ptr to hba specific information */
+ void *ctrl_context;
+ u32 ctrl_context_pages;
+ struct megasas_ctrl_info *ctrl_info;
+ unsigned int msix_vectors;
+ struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
+ struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
+ u64 map_id;
+ struct megasas_cmd *map_update_cmd;
+ unsigned long bar;
+ long reset_flags;
+ struct mutex reset_mutex;
+ struct timer_list sriov_heartbeat_timer;
+ char skip_heartbeat_timer_del;
+ u8 requestorId;
+ char PlasmaFW111;
+ char mpio;
+ u16 throttlequeuedepth;
+ u8 mask_interrupts;
+ u8 is_imr;
+};
+struct MR_LD_VF_MAP {
+ u32 size;
+ union MR_LD_REF ref;
+ u8 ldVfCount;
+ u8 reserved[6];
+ u8 policy[1];
+};
+
+struct MR_LD_VF_AFFILIATION {
+ u32 size;
+ u8 ldCount;
+ u8 vfCount;
+ u8 thisVf;
+ u8 reserved[9];
+ struct MR_LD_VF_MAP map[1];
+};
+
+/* Plasma 1.11 FW backward compatibility structures */
+#define IOV_111_OFFSET 0x7CE
+#define MAX_VIRTUAL_FUNCTIONS 8
+#define MR_LD_ACCESS_HIDDEN 15
+
+struct IOV_111 {
+ u8 maxVFsSupported;
+ u8 numVFsEnabled;
+ u8 requestorId;
+ u8 reserved[5];
+};
+
+struct MR_LD_VF_MAP_111 {
+ u8 targetId;
+ u8 reserved[3];
+ u8 policy[MAX_VIRTUAL_FUNCTIONS];
+};
+
+struct MR_LD_VF_AFFILIATION_111 {
+ u8 vdCount;
+ u8 vfCount;
+ u8 thisVf;
+ u8 reserved[5];
+ struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES];
+};
+
+struct MR_CTRL_HB_HOST_MEM {
+ struct {
+ u32 fwCounter; /* Firmware heart beat counter */
+ struct {
+ u32 debugmode:1; /* 1=Firmware is in debug mode.
+ Heart beat will not be updated. */
+ u32 reserved:31;
+ } debug;
+ u32 reserved_fw[6];
+ u32 driverCounter; /* Driver heart beat counter. 0x20 */
+ u32 reserved_driver[7];
+ } HB;
+ u8 pad[0x400-0x40];
+};
+
+enum {
+ MEGASAS_HBA_OPERATIONAL = 0,
+ MEGASAS_ADPRESET_SM_INFAULT = 1,
+ MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
+ MEGASAS_ADPRESET_SM_OPERATIONAL = 3,
+ MEGASAS_HW_CRITICAL_ERROR = 4,
+ MEGASAS_ADPRESET_SM_POLLING = 5,
+ MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
+};
+
+struct megasas_instance_template {
+ void (*fire_cmd)(struct megasas_instance *, dma_addr_t, \
+ u32, struct megasas_register_set __iomem *);
+
+ void (*enable_intr)(struct megasas_instance *);
+ void (*disable_intr)(struct megasas_instance *);
+
+ int (*clear_intr)(struct megasas_register_set __iomem *);
+
+ u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
+ int (*adp_reset)(struct megasas_instance *, \
+ struct megasas_register_set __iomem *);
+ int (*check_reset)(struct megasas_instance *, \
+ struct megasas_register_set __iomem *);
+ irqreturn_t (*service_isr)(int irq, void *devp);
+ void (*tasklet)(unsigned long);
+ u32 (*init_adapter)(struct megasas_instance *);
+ u32 (*build_and_issue_cmd) (struct megasas_instance *,
+ struct scsi_cmnd *);
+ void (*issue_dcmd) (struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
+};
+
+#define MEGASAS_IS_LOGICAL(scp) \
+ (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
+
+#define MEGASAS_DEV_INDEX(inst, scp) \
+ ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
+ scp->device->id
+
+struct megasas_cmd {
+
+ union megasas_frame *frame;
+ dma_addr_t frame_phys_addr;
+ u8 *sense;
+ dma_addr_t sense_phys_addr;
+
+ u32 index;
+ u8 sync_cmd;
+ u8 cmd_status;
+ u8 abort_aen;
+ u8 retry_for_fw_reset;
+
+
+ struct list_head list;
+ struct scsi_cmnd *scmd;
+
+ void *mpt_pthr_cmd_blocked;
+ atomic_t mfi_mpt_pthr;
+ u8 is_wait_event;
+
+ struct megasas_instance *instance;
+ union {
+ struct {
+ u16 smid;
+ u16 resvd;
+ } context;
+ u32 frame_count;
+ };
+};
+
+#define MAX_MGMT_ADAPTERS 1024
+#define MAX_IOCTL_SGE 16
+
+struct megasas_iocpacket {
+
+ u16 host_no;
+ u16 __pad1;
+ u32 sgl_off;
+ u32 sge_count;
+ u32 sense_off;
+ u32 sense_len;
+ union {
+ u8 raw[128];
+ struct megasas_header hdr;
+ } frame;
+
+ struct iovec sgl[MAX_IOCTL_SGE];
+
+} __attribute__ ((packed));
+
+struct megasas_aen {
+ u16 host_no;
+ u16 __pad1;
+ u32 seq_num;
+ u32 class_locale_word;
+} __attribute__ ((packed));
+
+#ifdef CONFIG_COMPAT
+struct compat_megasas_iocpacket {
+ u16 host_no;
+ u16 __pad1;
+ u32 sgl_off;
+ u32 sge_count;
+ u32 sense_off;
+ u32 sense_len;
+ union {
+ u8 raw[128];
+ struct megasas_header hdr;
+ } frame;
+ struct compat_iovec sgl[MAX_IOCTL_SGE];
+} __attribute__ ((packed));
+
+#define MEGASAS_IOC_FIRMWARE32 _IOWR('M', 1, struct compat_megasas_iocpacket)
+#endif
+
+#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket)
+#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen)
+
+struct megasas_mgmt_info {
+
+ u16 count;
+ struct megasas_instance *instance[MAX_MGMT_ADAPTERS];
+ int max_index;
+};
+
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+ struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN);
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+
+u16 get_updated_dev_handle(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
+ struct LD_LOAD_BALANCE_INFO *lbInfo);
+int megasas_get_ctrl_info(struct megasas_instance *instance);
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+ u8 crash_buf_state);
+void megasas_free_host_crash_buffer(struct megasas_instance *instance);
+void megasas_fusion_crash_dump_wq(struct work_struct *work);
+
+void megasas_return_cmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd);
+int megasas_issue_blocked_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd, int timeout);
+void __megasas_return_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
+
+void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion);
+int megasas_cmd_type(struct scsi_cmnd *cmd);
+
+#endif /*LSI_MEGARAID_SAS_H */
diff --git a/kernel/drivers/scsi/megaraid/megaraid_sas_base.c b/kernel/drivers/scsi/megaraid/megaraid_sas_base.c
new file mode 100644
index 000000000..890637fdd
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -0,0 +1,6892 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2003-2013 LSI Corporation
+ * Copyright (c) 2013-2014 Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Authors: Avago Technologies
+ * Sreenivas Bagalkote
+ * Sumant Patro
+ * Bo Yang
+ * Adam Radford
+ * Kashyap Desai <kashyap.desai@avagotech.com>
+ * Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ * San Jose, California 95131
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+/*
+ * Number of sectors per IO command
+ * Will be set in megasas_init_mfi if user does not provide
+ */
+static unsigned int max_sectors;
+module_param_named(max_sectors, max_sectors, int, 0);
+MODULE_PARM_DESC(max_sectors,
+ "Maximum number of sectors per IO command");
+
+static int msix_disable;
+module_param(msix_disable, int, S_IRUGO);
+MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
+
+static unsigned int msix_vectors;
+module_param(msix_vectors, int, S_IRUGO);
+MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
+
+static int allow_vf_ioctls;
+module_param(allow_vf_ioctls, int, S_IRUGO);
+MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
+
+static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
+module_param(throttlequeuedepth, int, S_IRUGO);
+MODULE_PARM_DESC(throttlequeuedepth,
+ "Adapter queue depth when throttled due to I/O timeout. Default: 16");
+
+int resetwaittime = MEGASAS_RESET_WAIT_TIME;
+module_param(resetwaittime, int, S_IRUGO);
+MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
+ "before resetting adapter. Default: 180");
+
+int smp_affinity_enable = 1;
+module_param(smp_affinity_enable, int, S_IRUGO);
+MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MEGASAS_VERSION);
+MODULE_AUTHOR("megaraidlinux@lsi.com");
+MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
+
+int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
+static int megasas_get_pd_list(struct megasas_instance *instance);
+static int megasas_ld_list_query(struct megasas_instance *instance,
+ u8 query_type);
+static int megasas_issue_init_mfi(struct megasas_instance *instance);
+static int megasas_register_aen(struct megasas_instance *instance,
+ u32 seq_num, u32 class_locale_word);
+/*
+ * PCI ID table for all supported controllers
+ */
+static struct pci_device_id megasas_pci_table[] = {
+
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
+ /* xscale IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
+ /* ppc IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
+ /* ppc IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
+ /* gen2*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
+ /* gen2*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
+ /* skinny*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
+ /* skinny*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
+ /* xscale IOP, vega */
+ {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
+ /* xscale IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
+ /* Fusion */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
+ /* Plasma */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
+ /* Invader */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
+ /* Fury */
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, megasas_pci_table);
+
+static int megasas_mgmt_majorno;
+struct megasas_mgmt_info megasas_mgmt_info;
+static struct fasync_struct *megasas_async_queue;
+static DEFINE_MUTEX(megasas_async_queue_mutex);
+
+static int megasas_poll_wait_aen;
+static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
+static u32 support_poll_for_event;
+u32 megasas_dbg_lvl;
+static u32 support_device_change;
+
+/* define lock for aen poll */
+spinlock_t poll_aen_lock;
+
+void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ u8 alt_status);
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *reg_set);
+static irqreturn_t megasas_isr(int irq, void *devp);
+static u32
+megasas_init_adapter_mfi(struct megasas_instance *instance);
+u32
+megasas_build_and_issue_cmd(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd);
+static void megasas_complete_cmd_dpc(unsigned long instance_addr);
+void
+megasas_release_fusion(struct megasas_instance *instance);
+int
+megasas_ioc_init_fusion(struct megasas_instance *instance);
+void
+megasas_free_cmds_fusion(struct megasas_instance *instance);
+u8
+megasas_get_map_info(struct megasas_instance *instance);
+int
+megasas_sync_map_info(struct megasas_instance *instance);
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds);
+void megasas_reset_reply_desc(struct megasas_instance *instance);
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);
+void megasas_fusion_ocr_wq(struct work_struct *work);
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial);
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd);
+
+void
+megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ instance->instancet->fire_cmd(instance,
+ cmd->frame_phys_addr, 0, instance->reg_set);
+}
+
+/**
+ * megasas_get_cmd - Get a command from the free pool
+ * @instance: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+struct megasas_cmd *megasas_get_cmd(struct megasas_instance
+ *instance)
+{
+ unsigned long flags;
+ struct megasas_cmd *cmd = NULL;
+
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+
+ if (!list_empty(&instance->cmd_pool)) {
+ cmd = list_entry((&instance->cmd_pool)->next,
+ struct megasas_cmd, list);
+ list_del_init(&cmd->list);
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_DETACHED);
+ } else {
+ printk(KERN_ERR "megasas: Command pool empty!\n");
+ }
+
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+ return cmd;
+}
+
+/**
+ * __megasas_return_cmd - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+inline void
+__megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ /*
+ * Don't go ahead and free the MFI frame, if corresponding
+ * MPT frame is not freed(valid for only fusion adapters).
+ * In case of MFI adapters, anyways for any allocated MFI
+ * frame will have cmd->mfi_mpt_mpthr set to MFI_MPT_DETACHED
+ */
+ if (atomic_read(&cmd->mfi_mpt_pthr) != MFI_MPT_DETACHED)
+ return;
+
+ cmd->scmd = NULL;
+ cmd->frame_count = 0;
+ cmd->is_wait_event = 0;
+ cmd->mpt_pthr_cmd_blocked = NULL;
+
+ if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
+ (reset_devices))
+ cmd->frame->hdr.cmd = MFI_CMD_INVALID;
+
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
+ list_add(&cmd->list, (&instance->cmd_pool)->next);
+}
+
+/**
+ * megasas_return_cmd - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+inline void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+ __megasas_return_cmd(instance, cmd);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+}
+
+
+/**
+* The following functions are defined for xscale
+* (deviceid : 1064R, PERC5) controllers
+*/
+
+/**
+ * megasas_enable_intr_xscale - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_xscale(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_xscale -Disables interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_xscale(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0x1f;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_xscale - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
+{
+ return readl(&(regs)->outbound_msg_0);
+}
+/**
+ * megasas_clear_interrupt_xscale - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
+{
+ u32 status;
+ u32 mfiStatus = 0;
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & MFI_OB_INTR_STATUS_MASK)
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+ if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ if (mfiStatus)
+ writel(status, &regs->outbound_intr_status);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_status);
+
+ return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_xscale - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_xscale(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel((frame_phys_addr >> 3)|(frame_count),
+ &(regs)->inbound_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_xscale - For controller reset
+ * @regs: MFI register set
+ */
+static int
+megasas_adp_reset_xscale(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ u32 i;
+ u32 pcidata;
+ writel(MFI_ADP_RESET, &regs->inbound_doorbell);
+
+ for (i = 0; i < 3; i++)
+ msleep(1000); /* sleep for 3 secs */
+ pcidata = 0;
+ pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
+ printk(KERN_NOTICE "pcidata = %x\n", pcidata);
+ if (pcidata & 0x2) {
+ printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata);
+ pcidata &= ~0x2;
+ pci_write_config_dword(instance->pdev,
+ MFI_1068_PCSR_OFFSET, pcidata);
+
+ for (i = 0; i < 2; i++)
+ msleep(1000); /* need to wait 2 secs again */
+
+ pcidata = 0;
+ pci_read_config_dword(instance->pdev,
+ MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
+ printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata);
+ if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
+ printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata);
+ pcidata = 0;
+ pci_write_config_dword(instance->pdev,
+ MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
+ }
+ }
+ return 0;
+}
+
+/**
+ * megasas_check_reset_xscale - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_xscale(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+
+ if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+ (le32_to_cpu(*instance->consumer) ==
+ MEGASAS_ADPRESET_INPROG_SIGN))
+ return 1;
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_xscale = {
+
+ .fire_cmd = megasas_fire_cmd_xscale,
+ .enable_intr = megasas_enable_intr_xscale,
+ .disable_intr = megasas_disable_intr_xscale,
+ .clear_intr = megasas_clear_intr_xscale,
+ .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
+ .adp_reset = megasas_adp_reset_xscale,
+ .check_reset = megasas_check_reset_xscale,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+* This is the end of set of functions & definitions specific
+* to xscale (deviceid : 1064R, PERC5) controllers
+*/
+
+/**
+* The following functions are defined for ppc (deviceid : 0x60)
+* controllers
+*/
+
+/**
+ * megasas_enable_intr_ppc - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_ppc(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+
+ writel(~0x80000000, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_ppc - Disable interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_ppc(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0xFFFFFFFF;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_ppc - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_ppc - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
+{
+ u32 status, mfiStatus = 0;
+
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ writel(status, &regs->outbound_doorbell_clear);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_doorbell_clear);
+
+ return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_ppc - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_ppc(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel((frame_phys_addr | (frame_count<<1))|1,
+ &(regs)->inbound_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_check_reset_ppc - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_ppc(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_ppc = {
+
+ .fire_cmd = megasas_fire_cmd_ppc,
+ .enable_intr = megasas_enable_intr_ppc,
+ .disable_intr = megasas_disable_intr_ppc,
+ .clear_intr = megasas_clear_intr_ppc,
+ .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
+ .adp_reset = megasas_adp_reset_xscale,
+ .check_reset = megasas_check_reset_ppc,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+ * megasas_enable_intr_skinny - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_skinny(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
+
+ writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_skinny - Disables interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_skinny(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0xFFFFFFFF;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_skinny - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_skinny - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
+{
+ u32 status;
+ u32 mfiStatus = 0;
+
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
+ return 0;
+ }
+
+ /*
+ * Check if it is our interrupt
+ */
+ if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
+ MFI_STATE_FAULT) {
+ mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+ } else
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ writel(status, &regs->outbound_intr_status);
+
+ /*
+ * dummy read to flush PCI
+ */
+ readl(&regs->outbound_intr_status);
+
+ return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_skinny - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_skinny(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel(upper_32_bits(frame_phys_addr),
+ &(regs)->inbound_high_queue_port);
+ writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
+ &(regs)->inbound_low_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_check_reset_skinny - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_skinny(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_skinny = {
+
+ .fire_cmd = megasas_fire_cmd_skinny,
+ .enable_intr = megasas_enable_intr_skinny,
+ .disable_intr = megasas_disable_intr_skinny,
+ .clear_intr = megasas_clear_intr_skinny,
+ .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
+ .adp_reset = megasas_adp_reset_gen2,
+ .check_reset = megasas_check_reset_skinny,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+
+/**
+* The following functions are defined for gen2 (deviceid : 0x78 0x79)
+* controllers
+*/
+
+/**
+ * megasas_enable_intr_gen2 - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_gen2(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+
+ /* write ~0x00000005 (4 & 1) to the intr mask*/
+ writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_gen2 - Disables interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_gen2(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0xFFFFFFFF;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_gen2 - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_gen2 - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
+{
+ u32 status;
+ u32 mfiStatus = 0;
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+ }
+ if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+ }
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ if (mfiStatus)
+ writel(status, &regs->outbound_doorbell_clear);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_status);
+
+ return mfiStatus;
+}
+/**
+ * megasas_fire_cmd_gen2 - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_gen2(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel((frame_phys_addr | (frame_count<<1))|1,
+ &(regs)->inbound_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_gen2 - For controller reset
+ * @regs: MFI register set
+ */
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *reg_set)
+{
+ u32 retry = 0 ;
+ u32 HostDiag;
+ u32 *seq_offset = &reg_set->seq_offset;
+ u32 *hostdiag_offset = &reg_set->host_diag;
+
+ if (instance->instancet == &megasas_instance_template_skinny) {
+ seq_offset = &reg_set->fusion_seq_offset;
+ hostdiag_offset = &reg_set->fusion_host_diag;
+ }
+
+ writel(0, seq_offset);
+ writel(4, seq_offset);
+ writel(0xb, seq_offset);
+ writel(2, seq_offset);
+ writel(7, seq_offset);
+ writel(0xd, seq_offset);
+
+ msleep(1000);
+
+ HostDiag = (u32)readl(hostdiag_offset);
+
+ while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
+ msleep(100);
+ HostDiag = (u32)readl(hostdiag_offset);
+ printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
+ retry, HostDiag);
+
+ if (retry++ >= 100)
+ return 1;
+
+ }
+
+ printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
+
+ writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
+
+ ssleep(10);
+
+ HostDiag = (u32)readl(hostdiag_offset);
+ while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
+ msleep(100);
+ HostDiag = (u32)readl(hostdiag_offset);
+ printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
+ retry, HostDiag);
+
+ if (retry++ >= 1000)
+ return 1;
+
+ }
+ return 0;
+}
+
+/**
+ * megasas_check_reset_gen2 - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_gen2 = {
+
+ .fire_cmd = megasas_fire_cmd_gen2,
+ .enable_intr = megasas_enable_intr_gen2,
+ .disable_intr = megasas_disable_intr_gen2,
+ .clear_intr = megasas_clear_intr_gen2,
+ .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
+ .adp_reset = megasas_adp_reset_gen2,
+ .check_reset = megasas_check_reset_gen2,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+* This is the end of set of functions & definitions
+* specific to gen2 (deviceid : 0x78, 0x79) controllers
+*/
+
+/*
+ * Template added for TB (Fusion)
+ */
+extern struct megasas_instance_template megasas_instance_template_fusion;
+
+/**
+ * megasas_issue_polled - Issues a polling command
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be issued
+ *
+ * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
+ */
+int
+megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ int seconds;
+
+ struct megasas_header *frame_hdr = &cmd->frame->hdr;
+
+ frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+
+ /*
+ * Issue the frame using inbound queue port
+ */
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ /*
+ * Wait for cmd_status to change
+ */
+ if (instance->requestorId)
+ seconds = MEGASAS_ROUTINE_WAIT_TIME_VF;
+ else
+ seconds = MFI_POLL_TIMEOUT_SECS;
+ return wait_and_poll(instance, cmd, seconds);
+}
+
+/**
+ * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
+ * @instance: Adapter soft state
+ * @cmd: Command to be issued
+ * @timeout: Timeout in seconds
+ *
+ * This function waits on an event for the command to be returned from ISR.
+ * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
+ * Used to issue ioctl commands.
+ */
+int
+megasas_issue_blocked_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd, int timeout)
+{
+ int ret = 0;
+ cmd->cmd_status = ENODATA;
+
+ cmd->is_wait_event = 1;
+ instance->instancet->issue_dcmd(instance, cmd);
+ if (timeout) {
+ ret = wait_event_timeout(instance->int_cmd_wait_q,
+ cmd->cmd_status != ENODATA, timeout * HZ);
+ if (!ret)
+ return 1;
+ } else
+ wait_event(instance->int_cmd_wait_q,
+ cmd->cmd_status != ENODATA);
+
+ return 0;
+}
+
+/**
+ * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
+ * @instance: Adapter soft state
+ * @cmd_to_abort: Previously issued cmd to be aborted
+ * @timeout: Timeout in seconds
+ *
+ * MFI firmware can abort previously issued AEN comamnd (automatic event
+ * notification). The megasas_issue_blocked_abort_cmd() issues such abort
+ * cmd and waits for return status.
+ * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
+ */
+static int
+megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_to_abort, int timeout)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_abort_frame *abort_fr;
+ int ret = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return -1;
+
+ abort_fr = &cmd->frame->abort;
+
+ /*
+ * Prepare and issue the abort frame
+ */
+ abort_fr->cmd = MFI_CMD_ABORT;
+ abort_fr->cmd_status = 0xFF;
+ abort_fr->flags = cpu_to_le16(0);
+ abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
+ abort_fr->abort_mfi_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
+ abort_fr->abort_mfi_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
+
+ cmd->sync_cmd = 1;
+ cmd->cmd_status = ENODATA;
+
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ if (timeout) {
+ ret = wait_event_timeout(instance->abort_cmd_wait_q,
+ cmd->cmd_status != ENODATA, timeout * HZ);
+ if (!ret) {
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+ return 1;
+ }
+ } else
+ wait_event(instance->abort_cmd_wait_q,
+ cmd->cmd_status != ENODATA);
+
+ cmd->sync_cmd = 0;
+
+ megasas_return_cmd(instance, cmd);
+ return 0;
+}
+
+/**
+ * megasas_make_sgl32 - Prepares 32-bit SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @mfi_sgl: SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ union megasas_sgl *mfi_sgl)
+{
+ int i;
+ int sge_count;
+ struct scatterlist *os_sgl;
+
+ sge_count = scsi_dma_map(scp);
+ BUG_ON(sge_count < 0);
+
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
+ }
+ }
+ return sge_count;
+}
+
+/**
+ * megasas_make_sgl64 - Prepares 64-bit SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @mfi_sgl: SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ union megasas_sgl *mfi_sgl)
+{
+ int i;
+ int sge_count;
+ struct scatterlist *os_sgl;
+
+ sge_count = scsi_dma_map(scp);
+ BUG_ON(sge_count < 0);
+
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
+ }
+ }
+ return sge_count;
+}
+
+/**
+ * megasas_make_sgl_skinny - Prepares IEEE SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @mfi_sgl: SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl_skinny(struct megasas_instance *instance,
+ struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
+{
+ int i;
+ int sge_count;
+ struct scatterlist *os_sgl;
+
+ sge_count = scsi_dma_map(scp);
+
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge_skinny[i].length =
+ cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge_skinny[i].phys_addr =
+ cpu_to_le64(sg_dma_address(os_sgl));
+ mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
+ }
+ }
+ return sge_count;
+}
+
+ /**
+ * megasas_get_frame_count - Computes the number of frames
+ * @frame_type : type of frame- io or pthru frame
+ * @sge_count : number of sg elements
+ *
+ * Returns the number of frames required for numnber of sge's (sge_count)
+ */
+
+static u32 megasas_get_frame_count(struct megasas_instance *instance,
+ u8 sge_count, u8 frame_type)
+{
+ int num_cnt;
+ int sge_bytes;
+ u32 sge_sz;
+ u32 frame_count=0;
+
+ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+ sizeof(struct megasas_sge32);
+
+ if (instance->flag_ieee) {
+ sge_sz = sizeof(struct megasas_sge_skinny);
+ }
+
+ /*
+ * Main frame can contain 2 SGEs for 64-bit SGLs and
+ * 3 SGEs for 32-bit SGLs for ldio &
+ * 1 SGEs for 64-bit SGLs and
+ * 2 SGEs for 32-bit SGLs for pthru frame
+ */
+ if (unlikely(frame_type == PTHRU_FRAME)) {
+ if (instance->flag_ieee == 1) {
+ num_cnt = sge_count - 1;
+ } else if (IS_DMA64)
+ num_cnt = sge_count - 1;
+ else
+ num_cnt = sge_count - 2;
+ } else {
+ if (instance->flag_ieee == 1) {
+ num_cnt = sge_count - 1;
+ } else if (IS_DMA64)
+ num_cnt = sge_count - 2;
+ else
+ num_cnt = sge_count - 3;
+ }
+
+ if(num_cnt>0){
+ sge_bytes = sge_sz * num_cnt;
+
+ frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
+ ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
+ }
+ /* Main frame */
+ frame_count +=1;
+
+ if (frame_count > 7)
+ frame_count = 8;
+ return frame_count;
+}
+
+/**
+ * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared in
+ *
+ * This function prepares CDB commands. These are typcially pass-through
+ * commands to the devices.
+ */
+static int
+megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ struct megasas_cmd *cmd)
+{
+ u32 is_logical;
+ u32 device_id;
+ u16 flags = 0;
+ struct megasas_pthru_frame *pthru;
+
+ is_logical = MEGASAS_IS_LOGICAL(scp);
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+ pthru = (struct megasas_pthru_frame *)cmd->frame;
+
+ if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ flags = MFI_FRAME_DIR_WRITE;
+ else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ flags = MFI_FRAME_DIR_READ;
+ else if (scp->sc_data_direction == PCI_DMA_NONE)
+ flags = MFI_FRAME_DIR_NONE;
+
+ if (instance->flag_ieee == 1) {
+ flags |= MFI_FRAME_IEEE;
+ }
+
+ /*
+ * Prepare the DCDB frame
+ */
+ pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
+ pthru->cmd_status = 0x0;
+ pthru->scsi_status = 0x0;
+ pthru->target_id = device_id;
+ pthru->lun = scp->device->lun;
+ pthru->cdb_len = scp->cmd_len;
+ pthru->timeout = 0;
+ pthru->pad_0 = 0;
+ pthru->flags = cpu_to_le16(flags);
+ pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
+
+ memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+ /*
+ * If the command is for the tape device, set the
+ * pthru timeout to the os layer timeout value.
+ */
+ if (scp->device->type == TYPE_TAPE) {
+ if ((scp->request->timeout / HZ) > 0xFFFF)
+ pthru->timeout = 0xFFFF;
+ else
+ pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
+ }
+
+ /*
+ * Construct SGL
+ */
+ if (instance->flag_ieee == 1) {
+ pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
+ &pthru->sgl);
+ } else if (IS_DMA64) {
+ pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ pthru->sge_count = megasas_make_sgl64(instance, scp,
+ &pthru->sgl);
+ } else
+ pthru->sge_count = megasas_make_sgl32(instance, scp,
+ &pthru->sgl);
+
+ if (pthru->sge_count > instance->max_num_sge) {
+ printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
+ pthru->sge_count);
+ return 0;
+ }
+
+ /*
+ * Sense info specific
+ */
+ pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
+ pthru->sense_buf_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
+ pthru->sense_buf_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
+
+ /*
+ * Compute the total number of frames this command consumes. FW uses
+ * this number to pull sufficient number of frames from host memory.
+ */
+ cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
+ PTHRU_FRAME);
+
+ return cmd->frame_count;
+}
+
+/**
+ * megasas_build_ldio - Prepares IOs to logical devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
+ */
+static int
+megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ struct megasas_cmd *cmd)
+{
+ u32 device_id;
+ u8 sc = scp->cmnd[0];
+ u16 flags = 0;
+ struct megasas_io_frame *ldio;
+
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+ ldio = (struct megasas_io_frame *)cmd->frame;
+
+ if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ flags = MFI_FRAME_DIR_WRITE;
+ else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ flags = MFI_FRAME_DIR_READ;
+
+ if (instance->flag_ieee == 1) {
+ flags |= MFI_FRAME_IEEE;
+ }
+
+ /*
+ * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
+ */
+ ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
+ ldio->cmd_status = 0x0;
+ ldio->scsi_status = 0x0;
+ ldio->target_id = device_id;
+ ldio->timeout = 0;
+ ldio->reserved_0 = 0;
+ ldio->pad_0 = 0;
+ ldio->flags = cpu_to_le16(flags);
+ ldio->start_lba_hi = 0;
+ ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
+
+ /*
+ * 6-byte READ(0x08) or WRITE(0x0A) cdb
+ */
+ if (scp->cmd_len == 6) {
+ ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
+ ((u32) scp->cmnd[2] << 8) |
+ (u32) scp->cmnd[3]);
+
+ ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
+ }
+
+ /*
+ * 10-byte READ(0x28) or WRITE(0x2A) cdb
+ */
+ else if (scp->cmd_len == 10) {
+ ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
+ ((u32) scp->cmnd[7] << 8));
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
+ }
+
+ /*
+ * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+ */
+ else if (scp->cmd_len == 12) {
+ ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) |
+ (u32) scp->cmnd[9]);
+
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
+ }
+
+ /*
+ * 16-byte READ(0x88) or WRITE(0x8A) cdb
+ */
+ else if (scp->cmd_len == 16) {
+ ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
+ ((u32) scp->cmnd[11] << 16) |
+ ((u32) scp->cmnd[12] << 8) |
+ (u32) scp->cmnd[13]);
+
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) |
+ (u32) scp->cmnd[9]);
+
+ ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
+
+ }
+
+ /*
+ * Construct SGL
+ */
+ if (instance->flag_ieee) {
+ ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
+ &ldio->sgl);
+ } else if (IS_DMA64) {
+ ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
+ } else
+ ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
+
+ if (ldio->sge_count > instance->max_num_sge) {
+ printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
+ ldio->sge_count);
+ return 0;
+ }
+
+ /*
+ * Sense info specific
+ */
+ ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
+ ldio->sense_buf_phys_addr_hi = 0;
+ ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
+
+ /*
+ * Compute the total number of frames this command consumes. FW uses
+ * this number to pull sufficient number of frames from host memory.
+ */
+ cmd->frame_count = megasas_get_frame_count(instance,
+ ldio->sge_count, IO_FRAME);
+
+ return cmd->frame_count;
+}
+
+/**
+ * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
+ * and whether it's RW or non RW
+ * @scmd: SCSI command
+ *
+ */
+inline int megasas_cmd_type(struct scsi_cmnd *cmd)
+{
+ int ret;
+
+ switch (cmd->cmnd[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_6:
+ case WRITE_6:
+ case READ_16:
+ case WRITE_16:
+ ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+ READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
+ break;
+ default:
+ ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+ NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
+ }
+ return ret;
+}
+
+ /**
+ * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
+ * in FW
+ * @instance: Adapter soft state
+ */
+static inline void
+megasas_dump_pending_frames(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ int i,n;
+ union megasas_sgl *mfi_sgl;
+ struct megasas_io_frame *ldio;
+ struct megasas_pthru_frame *pthru;
+ u32 sgcount;
+ u32 max_cmd = instance->max_fw_cmds;
+
+ printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
+ printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
+ if (IS_DMA64)
+ printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
+ else
+ printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
+
+ printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
+ for (i = 0; i < max_cmd; i++) {
+ cmd = instance->cmd_list[i];
+ if(!cmd->scmd)
+ continue;
+ printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
+ if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
+ ldio = (struct megasas_io_frame *)cmd->frame;
+ mfi_sgl = &ldio->sgl;
+ sgcount = ldio->sge_count;
+ printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
+ " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+ instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
+ le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
+ le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
+ }
+ else {
+ pthru = (struct megasas_pthru_frame *) cmd->frame;
+ mfi_sgl = &pthru->sgl;
+ sgcount = pthru->sge_count;
+ printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
+ "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+ instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
+ pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
+ le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
+ }
+ if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
+ for (n = 0; n < sgcount; n++){
+ if (IS_DMA64)
+ printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ",
+ le32_to_cpu(mfi_sgl->sge64[n].length),
+ le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
+ else
+ printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",
+ le32_to_cpu(mfi_sgl->sge32[n].length),
+ le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
+ }
+ }
+ printk(KERN_ERR "\n");
+ } /*for max_cmd*/
+ printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ if(cmd->sync_cmd == 1){
+ printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
+ }
+ }
+ printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
+}
+
+u32
+megasas_build_and_issue_cmd(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ struct megasas_cmd *cmd;
+ u32 frame_count;
+
+ cmd = megasas_get_cmd(instance);
+ if (!cmd)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /*
+ * Logical drive command
+ */
+ if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
+ frame_count = megasas_build_ldio(instance, scmd, cmd);
+ else
+ frame_count = megasas_build_dcdb(instance, scmd, cmd);
+
+ if (!frame_count)
+ goto out_return_cmd;
+
+ cmd->scmd = scmd;
+ scmd->SCp.ptr = (char *)cmd;
+
+ /*
+ * Issue the command to the FW
+ */
+ atomic_inc(&instance->fw_outstanding);
+
+ instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
+ cmd->frame_count-1, instance->reg_set);
+
+ return 0;
+out_return_cmd:
+ megasas_return_cmd(instance, cmd);
+ return 1;
+}
+
+
+/**
+ * megasas_queue_command - Queue entry point
+ * @scmd: SCSI command to be queued
+ * @done: Callback entry point
+ */
+static int
+megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct megasas_instance *instance;
+ unsigned long flags;
+
+ instance = (struct megasas_instance *)
+ scmd->device->host->hostdata;
+
+ if (instance->unload == 1) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (instance->issuepend_done == 0)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+
+ /* Check for an mpio path and adjust behavior */
+ if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ if (megasas_check_mpio_paths(instance, scmd) ==
+ (DID_RESET << 16)) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ } else {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ }
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ scmd->result = 0;
+
+ if (MEGASAS_IS_LOGICAL(scmd) &&
+ (scmd->device->id >= instance->fw_supported_vd_count ||
+ scmd->device->lun)) {
+ scmd->result = DID_BAD_TARGET << 16;
+ goto out_done;
+ }
+
+ switch (scmd->cmnd[0]) {
+ case SYNCHRONIZE_CACHE:
+ /*
+ * FW takes care of flush cache on its own
+ * No need to send it down
+ */
+ scmd->result = DID_OK << 16;
+ goto out_done;
+ default:
+ break;
+ }
+
+ if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
+ printk(KERN_ERR "megasas: Err returned from build_and_issue_cmd\n");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ return 0;
+
+ out_done:
+ scmd->scsi_done(scmd);
+ return 0;
+}
+
+static struct megasas_instance *megasas_lookup_instance(u16 host_no)
+{
+ int i;
+
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+
+ if ((megasas_mgmt_info.instance[i]) &&
+ (megasas_mgmt_info.instance[i]->host->host_no == host_no))
+ return megasas_mgmt_info.instance[i];
+ }
+
+ return NULL;
+}
+
+static int megasas_slave_configure(struct scsi_device *sdev)
+{
+ /*
+ * The RAID firmware may require extended timeouts.
+ */
+ blk_queue_rq_timeout(sdev->request_queue,
+ MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+
+ return 0;
+}
+
+static int megasas_slave_alloc(struct scsi_device *sdev)
+{
+ u16 pd_index = 0;
+ struct megasas_instance *instance ;
+ instance = megasas_lookup_instance(sdev->host->host_no);
+ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+ /*
+ * Open the OS scan to the SYSTEM PD
+ */
+ pd_index =
+ (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ return 0;
+ }
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/*
+* megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
+* kill adapter
+* @instance: Adapter soft state
+*
+*/
+void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
+{
+ int i;
+ struct megasas_cmd *cmd_mfi;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ /* Find all outstanding ioctls */
+ if (fusion) {
+ for (i = 0; i < instance->max_fw_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
+ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+ if (cmd_mfi->sync_cmd &&
+ cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
+ megasas_complete_cmd(instance,
+ cmd_mfi, DID_OK);
+ }
+ }
+ } else {
+ for (i = 0; i < instance->max_fw_cmds; i++) {
+ cmd_mfi = instance->cmd_list[i];
+ if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
+ MFI_CMD_ABORT)
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+ }
+ }
+}
+
+
+void megaraid_sas_kill_hba(struct megasas_instance *instance)
+{
+ /* Set critical error to block I/O & ioctls in case caller didn't */
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ /* Wait 1 second to ensure IO or ioctls in build have posted */
+ msleep(1000);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->doorbell);
+ /* Flush */
+ readl(&instance->reg_set->doorbell);
+ if (instance->mpio && instance->requestorId)
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ } else {
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->inbound_doorbell);
+ }
+ /* Complete outstanding ioctls when adapter is killed */
+ megasas_complete_outstanding_ioctls(instance);
+}
+
+ /**
+ * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
+ * restored to max value
+ * @instance: Adapter soft state
+ *
+ */
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
+{
+ unsigned long flags;
+
+ if (instance->flag & MEGASAS_FW_BUSY
+ && time_after(jiffies, instance->last_time + 5 * HZ)
+ && atomic_read(&instance->fw_outstanding) <
+ instance->throttlequeuedepth + 1) {
+
+ spin_lock_irqsave(instance->host->host_lock, flags);
+ instance->flag &= ~MEGASAS_FW_BUSY;
+
+ instance->host->can_queue = instance->max_scsi_cmds;
+ spin_unlock_irqrestore(instance->host->host_lock, flags);
+ }
+}
+
+/**
+ * megasas_complete_cmd_dpc - Returns FW's controller structure
+ * @instance_addr: Address of adapter soft state
+ *
+ * Tasklet to complete cmds
+ */
+static void megasas_complete_cmd_dpc(unsigned long instance_addr)
+{
+ u32 producer;
+ u32 consumer;
+ u32 context;
+ struct megasas_cmd *cmd;
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+ unsigned long flags;
+
+ /* If we have already declared adapter dead, donot complete cmds */
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR )
+ return;
+
+ spin_lock_irqsave(&instance->completion_lock, flags);
+
+ producer = le32_to_cpu(*instance->producer);
+ consumer = le32_to_cpu(*instance->consumer);
+
+ while (consumer != producer) {
+ context = le32_to_cpu(instance->reply_queue[consumer]);
+ if (context >= instance->max_fw_cmds) {
+ printk(KERN_ERR "Unexpected context value %x\n",
+ context);
+ BUG();
+ }
+
+ cmd = instance->cmd_list[context];
+
+ megasas_complete_cmd(instance, cmd, DID_OK);
+
+ consumer++;
+ if (consumer == (instance->max_fw_cmds + 1)) {
+ consumer = 0;
+ }
+ }
+
+ *instance->consumer = cpu_to_le32(producer);
+
+ spin_unlock_irqrestore(&instance->completion_lock, flags);
+
+ /*
+ * Check if we can restore can_queue
+ */
+ megasas_check_and_restore_queue_depth(instance);
+}
+
+/**
+ * megasas_start_timer - Initializes a timer object
+ * @instance: Adapter soft state
+ * @timer: timer object to be initialized
+ * @fn: timer function
+ * @interval: time interval between timer function call
+ *
+ */
+void megasas_start_timer(struct megasas_instance *instance,
+ struct timer_list *timer,
+ void *fn, unsigned long interval)
+{
+ init_timer(timer);
+ timer->expires = jiffies + interval;
+ timer->data = (unsigned long)instance;
+ timer->function = fn;
+ add_timer(timer);
+}
+
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
+
+static void
+process_fw_state_change_wq(struct work_struct *work);
+
+void megasas_do_ocr(struct megasas_instance *instance)
+{
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+ (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+ *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
+ }
+ instance->instancet->disable_intr(instance);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->issuepend_done = 0;
+
+ atomic_set(&instance->fw_outstanding, 0);
+ megasas_internal_reset_defer_cmds(instance);
+ process_fw_state_change_wq(&instance->work_init);
+}
+
+static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
+ dma_addr_t new_affiliation_111_h;
+ int ld, retval = 0;
+ u8 thisVf;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation_111:"
+ "Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (!instance->vf_affiliation_111) {
+ printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ if (initial)
+ memset(instance->vf_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ else {
+ new_affiliation_111 =
+ pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h);
+ if (!new_affiliation_111) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+ "memory for new affiliation for scsi%d.\n",
+ instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+ memset(new_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
+
+ if (initial)
+ dcmd->sgl.sge32[0].phys_addr =
+ instance->vf_affiliation_111_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
+
+ dcmd->sgl.sge32[0].length =
+ sizeof(struct MR_LD_VF_AFFILIATION_111);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+ "scsi%d\n", instance->host->host_no);
+
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+ " failed with status 0x%x for scsi%d.\n",
+ dcmd->cmd_status, instance->host->host_no);
+ retval = 1; /* Do a scan if we couldn't get affiliation */
+ goto out;
+ }
+
+ if (!initial) {
+ thisVf = new_affiliation_111->thisVf;
+ for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
+ if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
+ new_affiliation_111->map[ld].policy[thisVf]) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "Got new LD/VF affiliation "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ memcpy(instance->vf_affiliation_111,
+ new_affiliation_111,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ retval = 1;
+ goto out;
+ }
+ }
+out:
+ if (new_affiliation_111) {
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ new_affiliation_111,
+ new_affiliation_111_h);
+ }
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
+ struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
+ dma_addr_t new_affiliation_h;
+ int i, j, retval = 0, found = 0, doscan = 0;
+ u8 thisVf;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation12: "
+ "Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (!instance->vf_affiliation) {
+ printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ if (initial)
+ memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ else {
+ new_affiliation =
+ pci_alloc_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h);
+ if (!new_affiliation) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+ "memory for new affiliation for scsi%d.\n",
+ instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+ memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
+
+ if (initial)
+ dcmd->sgl.sge32[0].phys_addr = instance->vf_affiliation_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
+
+ dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+ "scsi%d\n", instance->host->host_no);
+
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+ " failed with status 0x%x for scsi%d.\n",
+ dcmd->cmd_status, instance->host->host_no);
+ retval = 1; /* Do a scan if we couldn't get affiliation */
+ goto out;
+ }
+
+ if (!initial) {
+ if (!new_affiliation->ldCount) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
+ "affiliation for passive path for scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ newmap = new_affiliation->map;
+ savedmap = instance->vf_affiliation->map;
+ thisVf = new_affiliation->thisVf;
+ for (i = 0 ; i < new_affiliation->ldCount; i++) {
+ found = 0;
+ for (j = 0; j < instance->vf_affiliation->ldCount;
+ j++) {
+ if (newmap->ref.targetId ==
+ savedmap->ref.targetId) {
+ found = 1;
+ if (newmap->policy[thisVf] !=
+ savedmap->policy[thisVf]) {
+ doscan = 1;
+ goto out;
+ }
+ }
+ savedmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)savedmap +
+ savedmap->size);
+ }
+ if (!found && newmap->policy[thisVf] !=
+ MR_LD_ACCESS_HIDDEN) {
+ doscan = 1;
+ goto out;
+ }
+ newmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)newmap + newmap->size);
+ }
+
+ newmap = new_affiliation->map;
+ savedmap = instance->vf_affiliation->map;
+
+ for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
+ found = 0;
+ for (j = 0 ; j < new_affiliation->ldCount; j++) {
+ if (savedmap->ref.targetId ==
+ newmap->ref.targetId) {
+ found = 1;
+ if (savedmap->policy[thisVf] !=
+ newmap->policy[thisVf]) {
+ doscan = 1;
+ goto out;
+ }
+ }
+ newmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)newmap +
+ newmap->size);
+ }
+ if (!found && savedmap->policy[thisVf] !=
+ MR_LD_ACCESS_HIDDEN) {
+ doscan = 1;
+ goto out;
+ }
+ savedmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)savedmap +
+ savedmap->size);
+ }
+ }
+out:
+ if (doscan) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ memcpy(instance->vf_affiliation, new_affiliation,
+ new_affiliation->size);
+ retval = 1;
+ }
+
+ if (new_affiliation)
+ pci_free_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ new_affiliation, new_affiliation_h);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+/* This function will get the current SR-IOV LD/VF affiliation */
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial)
+{
+ int retval;
+
+ if (instance->PlasmaFW111)
+ retval = megasas_get_ld_vf_affiliation_111(instance, initial);
+ else
+ retval = megasas_get_ld_vf_affiliation_12(instance, initial);
+ return retval;
+}
+
+/* This function will tell FW to start the SR-IOV heartbeat */
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ int retval = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: "
+ "Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (initial) {
+ instance->hb_host_mem =
+ pci_zalloc_consistent(instance->pdev,
+ sizeof(struct MR_CTRL_HB_HOST_MEM),
+ &instance->hb_host_mem_h);
+ if (!instance->hb_host_mem) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate"
+ " memory for heartbeat host memory for "
+ "scsi%d.\n", instance->host->host_no);
+ retval = -ENOMEM;
+ goto out;
+ }
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC;
+ dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h;
+ dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
+ instance->host->host_no);
+
+ if (!megasas_issue_polled(instance, cmd)) {
+ retval = 0;
+ } else {
+ printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+ "_MEM_ALLOC DCMD timed out for scsi%d\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+ "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n",
+ dcmd->cmd_status,
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+
+out:
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+/* Handler for SR-IOV heartbeat */
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
+{
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+
+ if (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ mod_timer(&instance->sriov_heartbeat_timer,
+ jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ } else {
+ printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never "
+ "completed for scsi%d\n", instance->host->host_no);
+ schedule_work(&instance->work_init);
+ }
+}
+
+/**
+ * megasas_wait_for_outstanding - Wait for all outstanding cmds
+ * @instance: Adapter soft state
+ *
+ * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
+ * complete all its outstanding commands. Returns error if one or more IOs
+ * are pending after this time period. It also marks the controller dead.
+ */
+static int megasas_wait_for_outstanding(struct megasas_instance *instance)
+{
+ int i;
+ u32 reset_index;
+ u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+ u8 adprecovery;
+ unsigned long flags;
+ struct list_head clist_local;
+ struct megasas_cmd *reset_cmd;
+ u32 fw_state;
+ u8 kill_adapter_flag;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ adprecovery = instance->adprecovery;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+
+ INIT_LIST_HEAD(&clist_local);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ list_splice_init(&instance->internal_reset_pending_q,
+ &clist_local);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ printk(KERN_NOTICE "megasas: HBA reset wait ...\n");
+ for (i = 0; i < wait_time; i++) {
+ msleep(1000);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ adprecovery = instance->adprecovery;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ if (adprecovery == MEGASAS_HBA_OPERATIONAL)
+ break;
+ }
+
+ if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n");
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return FAILED;
+ }
+
+ reset_index = 0;
+ while (!list_empty(&clist_local)) {
+ reset_cmd = list_entry((&clist_local)->next,
+ struct megasas_cmd, list);
+ list_del_init(&reset_cmd->list);
+ if (reset_cmd->scmd) {
+ reset_cmd->scmd->result = DID_RESET << 16;
+ printk(KERN_NOTICE "%d:%p reset [%02x]\n",
+ reset_index, reset_cmd,
+ reset_cmd->scmd->cmnd[0]);
+
+ reset_cmd->scmd->scsi_done(reset_cmd->scmd);
+ megasas_return_cmd(instance, reset_cmd);
+ } else if (reset_cmd->sync_cmd) {
+ printk(KERN_NOTICE "megasas:%p synch cmds"
+ "reset queue\n",
+ reset_cmd);
+
+ reset_cmd->cmd_status = ENODATA;
+ instance->instancet->fire_cmd(instance,
+ reset_cmd->frame_phys_addr,
+ 0, instance->reg_set);
+ } else {
+ printk(KERN_NOTICE "megasas: %p unexpected"
+ "cmds lst\n",
+ reset_cmd);
+ }
+ reset_index++;
+ }
+
+ return SUCCESS;
+ }
+
+ for (i = 0; i < resetwaittime; i++) {
+
+ int outstanding = atomic_read(&instance->fw_outstanding);
+
+ if (!outstanding)
+ break;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+ "commands to complete\n",i,outstanding);
+ /*
+ * Call cmd completion routine. Cmd to be
+ * be completed directly without depending on isr.
+ */
+ megasas_complete_cmd_dpc((unsigned long)instance);
+ }
+
+ msleep(1000);
+ }
+
+ i = 0;
+ kill_adapter_flag = 0;
+ do {
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ if ((fw_state == MFI_STATE_FAULT) &&
+ (instance->disableOnlineCtrlReset == 0)) {
+ if (i == 3) {
+ kill_adapter_flag = 2;
+ break;
+ }
+ megasas_do_ocr(instance);
+ kill_adapter_flag = 1;
+
+ /* wait for 1 secs to let FW finish the pending cmds */
+ msleep(1000);
+ }
+ i++;
+ } while (i <= 3);
+
+ if (atomic_read(&instance->fw_outstanding) &&
+ !kill_adapter_flag) {
+ if (instance->disableOnlineCtrlReset == 0) {
+
+ megasas_do_ocr(instance);
+
+ /* wait for 5 secs to let FW finish the pending cmds */
+ for (i = 0; i < wait_time; i++) {
+ int outstanding =
+ atomic_read(&instance->fw_outstanding);
+ if (!outstanding)
+ return SUCCESS;
+ msleep(1000);
+ }
+ }
+ }
+
+ if (atomic_read(&instance->fw_outstanding) ||
+ (kill_adapter_flag == 2)) {
+ printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
+ /*
+ * Send signal to FW to stop processing any pending cmds.
+ * The controller will be taken offline by the OS now.
+ */
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->doorbell);
+ } else {
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->inbound_doorbell);
+ }
+ megasas_dump_pending_frames(instance);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return FAILED;
+ }
+
+ printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n");
+
+ return SUCCESS;
+}
+
+/**
+ * megasas_generic_reset - Generic reset routine
+ * @scmd: Mid-layer SCSI command
+ *
+ * This routine implements a generic reset handler for device, bus and host
+ * reset requests. Device, bus and host specific reset handlers can use this
+ * function after they do their specific tasks.
+ */
+static int megasas_generic_reset(struct scsi_cmnd *scmd)
+{
+ int ret_val;
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
+ scmd->cmnd[0], scmd->retries);
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ printk(KERN_ERR "megasas: cannot recover from previous reset "
+ "failures\n");
+ return FAILED;
+ }
+
+ ret_val = megasas_wait_for_outstanding(instance);
+ if (ret_val == SUCCESS)
+ printk(KERN_NOTICE "megasas: reset successful \n");
+ else
+ printk(KERN_ERR "megasas: failed to do reset\n");
+
+ return ret_val;
+}
+
+/**
+ * megasas_reset_timer - quiesce the adapter if required
+ * @scmd: scsi cmnd
+ *
+ * Sets the FW busy flag and reduces the host->can_queue if the
+ * cmd has not been completed within the timeout period.
+ */
+static enum
+blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
+{
+ struct megasas_instance *instance;
+ unsigned long flags;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc +
+ (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
+ return BLK_EH_NOT_HANDLED;
+ }
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+ if (!(instance->flag & MEGASAS_FW_BUSY)) {
+ /* FW is busy, throttle IO */
+ spin_lock_irqsave(instance->host->host_lock, flags);
+
+ instance->host->can_queue = instance->throttlequeuedepth;
+ instance->last_time = jiffies;
+ instance->flag |= MEGASAS_FW_BUSY;
+
+ spin_unlock_irqrestore(instance->host->host_lock, flags);
+ }
+ return BLK_EH_RESET_TIMER;
+}
+
+/**
+ * megasas_reset_device - Device reset handler entry point
+ */
+static int megasas_reset_device(struct scsi_cmnd *scmd)
+{
+ int ret;
+
+ /*
+ * First wait for all commands to complete
+ */
+ ret = megasas_generic_reset(scmd);
+
+ return ret;
+}
+
+/**
+ * megasas_reset_bus_host - Bus & host reset handler entry point
+ */
+static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
+{
+ int ret;
+ struct megasas_instance *instance;
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ /*
+ * First wait for all commands to complete
+ */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ ret = megasas_reset_fusion(scmd->device->host, 1);
+ else
+ ret = megasas_generic_reset(scmd);
+
+ return ret;
+}
+
+/**
+ * megasas_bios_param - Returns disk geometry for a disk
+ * @sdev: device handle
+ * @bdev: block device
+ * @capacity: drive capacity
+ * @geom: geometry parameters
+ */
+static int
+megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ unsigned long tmp;
+ /* Default heads (64) & sectors (32) */
+ heads = 64;
+ sectors = 32;
+
+ tmp = heads * sectors;
+ cylinders = capacity;
+
+ sector_div(cylinders, tmp);
+
+ /*
+ * Handle extended translation size for logical drives > 1Gb
+ */
+
+ if (capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ tmp = heads*sectors;
+ cylinders = capacity;
+ sector_div(cylinders, tmp);
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return 0;
+}
+
+static void megasas_aen_polling(struct work_struct *work);
+
+/**
+ * megasas_service_aen - Processes an event notification
+ * @instance: Adapter soft state
+ * @cmd: AEN command completed by the ISR
+ *
+ * For AEN, driver sends a command down to FW that is held by the FW till an
+ * event occurs. When an event of interest occurs, FW completes the command
+ * that it was previously holding.
+ *
+ * This routines sends SIGIO signal to processes that have registered with the
+ * driver for AEN.
+ */
+static void
+megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ unsigned long flags;
+ /*
+ * Don't signal app if it is just an aborted previously registered aen
+ */
+ if ((!cmd->abort_aen) && (instance->unload == 0)) {
+ spin_lock_irqsave(&poll_aen_lock, flags);
+ megasas_poll_wait_aen = 1;
+ spin_unlock_irqrestore(&poll_aen_lock, flags);
+ wake_up(&megasas_poll_wait);
+ kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
+ }
+ else
+ cmd->abort_aen = 0;
+
+ instance->aen_cmd = NULL;
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ if ((instance->unload == 0) &&
+ ((instance->issuepend_done == 1))) {
+ struct megasas_aen_event *ev;
+ ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
+ if (!ev) {
+ printk(KERN_ERR "megasas_service_aen: out of memory\n");
+ } else {
+ ev->instance = instance;
+ instance->ev = ev;
+ INIT_DELAYED_WORK(&ev->hotplug_work,
+ megasas_aen_polling);
+ schedule_delayed_work(&ev->hotplug_work, 0);
+ }
+ }
+}
+
+static ssize_t
+megasas_fw_crash_buffer_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ int val = 0;
+ unsigned long flags;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ instance->fw_crash_buffer_offset = val;
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_buffer_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ u32 size;
+ unsigned long buff_addr;
+ unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+ unsigned long src_addr;
+ unsigned long flags;
+ u32 buff_offset;
+
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ buff_offset = instance->fw_crash_buffer_offset;
+ if (!instance->crash_dump_buf &&
+ !((instance->fw_crash_state == AVAILABLE) ||
+ (instance->fw_crash_state == COPYING))) {
+ dev_err(&instance->pdev->dev,
+ "Firmware crash dump is not available\n");
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return -EINVAL;
+ }
+
+ buff_addr = (unsigned long) buf;
+
+ if (buff_offset >
+ (instance->fw_crash_buffer_size * dmachunk)) {
+ dev_err(&instance->pdev->dev,
+ "Firmware crash dump offset is out of range\n");
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return 0;
+ }
+
+ size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+ size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+
+ src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
+ (buff_offset % dmachunk);
+ memcpy(buf, (void *)src_addr, size);
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+
+ return size;
+}
+
+static ssize_t
+megasas_fw_crash_buffer_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
+ ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
+}
+
+static ssize_t
+megasas_fw_crash_state_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ int val = 0;
+ unsigned long flags;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ if ((val <= AVAILABLE || val > COPY_ERROR)) {
+ dev_err(&instance->pdev->dev, "application updates invalid "
+ "firmware crash state\n");
+ return -EINVAL;
+ }
+
+ instance->fw_crash_state = val;
+
+ if ((val == COPIED) || (val == COPY_ERROR)) {
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ megasas_free_host_crash_buffer(instance);
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ if (val == COPY_ERROR)
+ dev_info(&instance->pdev->dev, "application failed to "
+ "copy Firmware crash dump\n");
+ else
+ dev_info(&instance->pdev->dev, "Firmware crash dump "
+ "copied successfully\n");
+ }
+ return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_state_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
+}
+
+static ssize_t
+megasas_page_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
+}
+
+static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
+ megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
+static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
+ megasas_fw_crash_buffer_size_show, NULL);
+static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
+ megasas_fw_crash_state_show, megasas_fw_crash_state_store);
+static DEVICE_ATTR(page_size, S_IRUGO,
+ megasas_page_size_show, NULL);
+
+struct device_attribute *megaraid_host_attrs[] = {
+ &dev_attr_fw_crash_buffer_size,
+ &dev_attr_fw_crash_buffer,
+ &dev_attr_fw_crash_state,
+ &dev_attr_page_size,
+ NULL,
+};
+
+/*
+ * Scsi host template for megaraid_sas driver
+ */
+static struct scsi_host_template megasas_template = {
+
+ .module = THIS_MODULE,
+ .name = "LSI SAS based MegaRAID driver",
+ .proc_name = "megaraid_sas",
+ .slave_configure = megasas_slave_configure,
+ .slave_alloc = megasas_slave_alloc,
+ .queuecommand = megasas_queue_command,
+ .eh_device_reset_handler = megasas_reset_device,
+ .eh_bus_reset_handler = megasas_reset_bus_host,
+ .eh_host_reset_handler = megasas_reset_bus_host,
+ .eh_timed_out = megasas_reset_timer,
+ .shost_attrs = megaraid_host_attrs,
+ .bios_param = megasas_bios_param,
+ .use_clustering = ENABLE_CLUSTERING,
+ .change_queue_depth = scsi_change_queue_depth,
+ .no_write_same = 1,
+};
+
+/**
+ * megasas_complete_int_cmd - Completes an internal command
+ * @instance: Adapter soft state
+ * @cmd: Command to be completed
+ *
+ * The megasas_issue_blocked_cmd() function waits for a command to complete
+ * after it issues a command. This function wakes up that waiting routine by
+ * calling wake_up() on the wait queue.
+ */
+static void
+megasas_complete_int_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ cmd->cmd_status = cmd->frame->io.cmd_status;
+
+ if (cmd->cmd_status == ENODATA) {
+ cmd->cmd_status = 0;
+ }
+ wake_up(&instance->int_cmd_wait_q);
+}
+
+/**
+ * megasas_complete_abort - Completes aborting a command
+ * @instance: Adapter soft state
+ * @cmd: Cmd that was issued to abort another cmd
+ *
+ * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
+ * after it issues an abort on a previously issued command. This function
+ * wakes up all functions waiting on the same wait queue.
+ */
+static void
+megasas_complete_abort(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ cmd->cmd_status = 0;
+ wake_up(&instance->abort_cmd_wait_q);
+ }
+
+ return;
+}
+
+/**
+ * megasas_complete_cmd - Completes a command
+ * @instance: Adapter soft state
+ * @cmd: Command to be completed
+ * @alt_status: If non-zero, use this value as status to
+ * SCSI mid-layer instead of the value returned
+ * by the FW. This should be used if caller wants
+ * an alternate status (as in the case of aborted
+ * commands)
+ */
+void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ u8 alt_status)
+{
+ int exception = 0;
+ struct megasas_header *hdr = &cmd->frame->hdr;
+ unsigned long flags;
+ struct fusion_context *fusion = instance->ctrl_context;
+ u32 opcode;
+
+ /* flag for the retry reset */
+ cmd->retry_for_fw_reset = 0;
+
+ if (cmd->scmd)
+ cmd->scmd->SCp.ptr = NULL;
+
+ switch (hdr->cmd) {
+ case MFI_CMD_INVALID:
+ /* Some older 1068 controller FW may keep a pended
+ MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
+ when booting the kdump kernel. Ignore this command to
+ prevent a kernel panic on shutdown of the kdump kernel. */
+ printk(KERN_WARNING "megaraid_sas: MFI_CMD_INVALID command "
+ "completed.\n");
+ printk(KERN_WARNING "megaraid_sas: If you have a controller "
+ "other than PERC5, please upgrade your firmware.\n");
+ break;
+ case MFI_CMD_PD_SCSI_IO:
+ case MFI_CMD_LD_SCSI_IO:
+
+ /*
+ * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
+ * issued either through an IO path or an IOCTL path. If it
+ * was via IOCTL, we will send it to internal completion.
+ */
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ megasas_complete_int_cmd(instance, cmd);
+ break;
+ }
+
+ case MFI_CMD_LD_READ:
+ case MFI_CMD_LD_WRITE:
+
+ if (alt_status) {
+ cmd->scmd->result = alt_status << 16;
+ exception = 1;
+ }
+
+ if (exception) {
+
+ atomic_dec(&instance->fw_outstanding);
+
+ scsi_dma_unmap(cmd->scmd);
+ cmd->scmd->scsi_done(cmd->scmd);
+ megasas_return_cmd(instance, cmd);
+
+ break;
+ }
+
+ switch (hdr->cmd_status) {
+
+ case MFI_STAT_OK:
+ cmd->scmd->result = DID_OK << 16;
+ break;
+
+ case MFI_STAT_SCSI_IO_FAILED:
+ case MFI_STAT_LD_INIT_IN_PROGRESS:
+ cmd->scmd->result =
+ (DID_ERROR << 16) | hdr->scsi_status;
+ break;
+
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+ cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
+
+ if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
+ memset(cmd->scmd->sense_buffer, 0,
+ SCSI_SENSE_BUFFERSIZE);
+ memcpy(cmd->scmd->sense_buffer, cmd->sense,
+ hdr->sense_len);
+
+ cmd->scmd->result |= DRIVER_SENSE << 24;
+ }
+
+ break;
+
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ cmd->scmd->result = DID_BAD_TARGET << 16;
+ break;
+
+ default:
+ printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
+ hdr->cmd_status);
+ cmd->scmd->result = DID_ERROR << 16;
+ break;
+ }
+
+ atomic_dec(&instance->fw_outstanding);
+
+ scsi_dma_unmap(cmd->scmd);
+ cmd->scmd->scsi_done(cmd->scmd);
+ megasas_return_cmd(instance, cmd);
+
+ break;
+
+ case MFI_CMD_SMP:
+ case MFI_CMD_STP:
+ case MFI_CMD_DCMD:
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+ /* Check for LD map update */
+ if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+ && (cmd->frame->dcmd.mbox.b[1] == 1)) {
+ fusion->fast_path_io = 0;
+ spin_lock_irqsave(instance->host->host_lock, flags);
+ if (cmd->frame->hdr.cmd_status != 0) {
+ if (cmd->frame->hdr.cmd_status !=
+ MFI_STAT_NOT_FOUND)
+ printk(KERN_WARNING "megasas: map sync"
+ "failed, status = 0x%x.\n",
+ cmd->frame->hdr.cmd_status);
+ else {
+ megasas_return_mfi_mpt_pthr(instance,
+ cmd, cmd->mpt_pthr_cmd_blocked);
+ spin_unlock_irqrestore(
+ instance->host->host_lock,
+ flags);
+ break;
+ }
+ } else
+ instance->map_id++;
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+
+ /*
+ * Set fast path IO to ZERO.
+ * Validate Map will set proper value.
+ * Meanwhile all IOs will go as LD IO.
+ */
+ if (MR_ValidateMapInfo(instance))
+ fusion->fast_path_io = 1;
+ else
+ fusion->fast_path_io = 0;
+ megasas_sync_map_info(instance);
+ spin_unlock_irqrestore(instance->host->host_lock,
+ flags);
+ break;
+ }
+ if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+ opcode == MR_DCMD_CTRL_EVENT_GET) {
+ spin_lock_irqsave(&poll_aen_lock, flags);
+ megasas_poll_wait_aen = 0;
+ spin_unlock_irqrestore(&poll_aen_lock, flags);
+ }
+
+ /*
+ * See if got an event notification
+ */
+ if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ megasas_service_aen(instance, cmd);
+ else
+ megasas_complete_int_cmd(instance, cmd);
+
+ break;
+
+ case MFI_CMD_ABORT:
+ /*
+ * Cmd issued to abort another cmd returned
+ */
+ megasas_complete_abort(instance, cmd);
+ break;
+
+ default:
+ printk("megasas: Unknown command completed! [0x%X]\n",
+ hdr->cmd);
+ break;
+ }
+}
+
+/**
+ * megasas_issue_pending_cmds_again - issue all pending cmds
+ * in FW again because of the fw reset
+ * @instance: Adapter soft state
+ */
+static inline void
+megasas_issue_pending_cmds_again(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ struct list_head clist_local;
+ union megasas_evt_class_locale class_locale;
+ unsigned long flags;
+ u32 seq_num;
+
+ INIT_LIST_HEAD(&clist_local);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ list_splice_init(&instance->internal_reset_pending_q, &clist_local);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ while (!list_empty(&clist_local)) {
+ cmd = list_entry((&clist_local)->next,
+ struct megasas_cmd, list);
+ list_del_init(&cmd->list);
+
+ if (cmd->sync_cmd || cmd->scmd) {
+ printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d"
+ "detected to be pending while HBA reset.\n",
+ cmd, cmd->scmd, cmd->sync_cmd);
+
+ cmd->retry_for_fw_reset++;
+
+ if (cmd->retry_for_fw_reset == 3) {
+ printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d"
+ "was tried multiple times during reset."
+ "Shutting down the HBA\n",
+ cmd, cmd->scmd, cmd->sync_cmd);
+ instance->instancet->disable_intr(instance);
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ megaraid_sas_kill_hba(instance);
+ return;
+ }
+ }
+
+ if (cmd->sync_cmd == 1) {
+ if (cmd->scmd) {
+ printk(KERN_NOTICE "megaraid_sas: unexpected"
+ "cmd attached to internal command!\n");
+ }
+ printk(KERN_NOTICE "megasas: %p synchronous cmd"
+ "on the internal reset queue,"
+ "issue it again.\n", cmd);
+ cmd->cmd_status = ENODATA;
+ instance->instancet->fire_cmd(instance,
+ cmd->frame_phys_addr ,
+ 0, instance->reg_set);
+ } else if (cmd->scmd) {
+ printk(KERN_NOTICE "megasas: %p scsi cmd [%02x]"
+ "detected on the internal queue, issue again.\n",
+ cmd, cmd->scmd->cmnd[0]);
+
+ atomic_inc(&instance->fw_outstanding);
+ instance->instancet->fire_cmd(instance,
+ cmd->frame_phys_addr,
+ cmd->frame_count-1, instance->reg_set);
+ } else {
+ printk(KERN_NOTICE "megasas: %p unexpected cmd on the"
+ "internal reset defer list while re-issue!!\n",
+ cmd);
+ }
+ }
+
+ if (instance->aen_cmd) {
+ printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n");
+ megasas_return_cmd(instance, instance->aen_cmd);
+
+ instance->aen_cmd = NULL;
+ }
+
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ seq_num = instance->last_seq_num;
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ megasas_register_aen(instance, seq_num, class_locale.word);
+}
+
+/**
+ * Move the internal reset pending commands to a deferred queue.
+ *
+ * We move the commands pending at internal reset time to a
+ * pending queue. This queue would be flushed after successful
+ * completion of the internal reset sequence. if the internal reset
+ * did not complete in time, the kernel reset handler would flush
+ * these commands.
+ **/
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ int i;
+ u32 max_cmd = instance->max_fw_cmds;
+ u32 defer_index;
+ unsigned long flags;
+
+ defer_index = 0;
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+ for (i = 0; i < max_cmd; i++) {
+ cmd = instance->cmd_list[i];
+ if (cmd->sync_cmd == 1 || cmd->scmd) {
+ printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p"
+ "on the defer queue as internal\n",
+ defer_index, cmd, cmd->sync_cmd, cmd->scmd);
+
+ if (!list_empty(&cmd->list)) {
+ printk(KERN_NOTICE "megaraid_sas: ERROR while"
+ " moving this cmd:%p, %d %p, it was"
+ "discovered on some list?\n",
+ cmd, cmd->sync_cmd, cmd->scmd);
+
+ list_del_init(&cmd->list);
+ }
+ defer_index++;
+ list_add_tail(&cmd->list,
+ &instance->internal_reset_pending_q);
+ }
+ }
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+}
+
+
+static void
+process_fw_state_change_wq(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance, work_init);
+ u32 wait;
+ unsigned long flags;
+
+ if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
+ printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n",
+ instance->adprecovery);
+ return ;
+ }
+
+ if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault"
+ "state, restarting it...\n");
+
+ instance->instancet->disable_intr(instance);
+ atomic_set(&instance->fw_outstanding, 0);
+
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset(instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0 );
+
+ printk(KERN_NOTICE "megaraid_sas: FW restarted successfully,"
+ "initiating next stage...\n");
+
+ printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine,"
+ "state 2 starting...\n");
+
+ /*waitting for about 20 second before start the second init*/
+ for (wait = 0; wait < 30; wait++) {
+ msleep(1000);
+ }
+
+ if (megasas_transition_to_ready(instance, 1)) {
+ printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
+
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ megaraid_sas_kill_hba(instance);
+ return ;
+ }
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+ (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
+ ) {
+ *instance->consumer = *instance->producer;
+ } else {
+ *instance->consumer = 0;
+ *instance->producer = 0;
+ }
+
+ megasas_issue_init_mfi(instance);
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ instance->instancet->enable_intr(instance);
+
+ megasas_issue_pending_cmds_again(instance);
+ instance->issuepend_done = 1;
+ }
+ return ;
+}
+
+/**
+ * megasas_deplete_reply_queue - Processes all completed commands
+ * @instance: Adapter soft state
+ * @alt_status: Alternate status to be returned to
+ * SCSI mid-layer instead of the status
+ * returned by the FW
+ * Note: this must be called with hba lock held
+ */
+static int
+megasas_deplete_reply_queue(struct megasas_instance *instance,
+ u8 alt_status)
+{
+ u32 mfiStatus;
+ u32 fw_state;
+
+ if ((mfiStatus = instance->instancet->check_reset(instance,
+ instance->reg_set)) == 1) {
+ return IRQ_HANDLED;
+ }
+
+ if ((mfiStatus = instance->instancet->clear_intr(
+ instance->reg_set)
+ ) == 0) {
+ /* Hardware may not set outbound_intr_status in MSI-X mode */
+ if (!instance->msix_vectors)
+ return IRQ_NONE;
+ }
+
+ instance->mfiStatus = mfiStatus;
+
+ if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+
+ if (fw_state != MFI_STATE_FAULT) {
+ printk(KERN_NOTICE "megaraid_sas: fw state:%x\n",
+ fw_state);
+ }
+
+ if ((fw_state == MFI_STATE_FAULT) &&
+ (instance->disableOnlineCtrlReset == 0)) {
+ printk(KERN_NOTICE "megaraid_sas: wait adp restart\n");
+
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS1064R) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_DELL_PERC5) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+
+ *instance->consumer =
+ cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
+ }
+
+
+ instance->instancet->disable_intr(instance);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->issuepend_done = 0;
+
+ atomic_set(&instance->fw_outstanding, 0);
+ megasas_internal_reset_defer_cmds(instance);
+
+ printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n",
+ fw_state, instance->adprecovery);
+
+ schedule_work(&instance->work_init);
+ return IRQ_HANDLED;
+
+ } else {
+ printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n",
+ fw_state, instance->disableOnlineCtrlReset);
+ }
+ }
+
+ tasklet_schedule(&instance->isr_tasklet);
+ return IRQ_HANDLED;
+}
+/**
+ * megasas_isr - isr entry point
+ */
+static irqreturn_t megasas_isr(int irq, void *devp)
+{
+ struct megasas_irq_context *irq_context = devp;
+ struct megasas_instance *instance = irq_context->instance;
+ unsigned long flags;
+ irqreturn_t rc;
+
+ if (atomic_read(&instance->fw_reset_no_pci_access))
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ rc = megasas_deplete_reply_queue(instance, DID_OK);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ return rc;
+}
+
+/**
+ * megasas_transition_to_ready - Move the FW to READY state
+ * @instance: Adapter soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+int
+megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
+{
+ int i;
+ u8 max_wait;
+ u32 fw_state;
+ u32 cur_state;
+ u32 abs_state, curr_abs_state;
+
+ abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
+ fw_state = abs_state & MFI_STATE_MASK;
+
+ if (fw_state != MFI_STATE_READY)
+ printk(KERN_INFO "megasas: Waiting for FW to come to ready"
+ " state\n");
+
+ while (fw_state != MFI_STATE_READY) {
+
+ switch (fw_state) {
+
+ case MFI_STATE_FAULT:
+ printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
+ if (ocr) {
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FAULT;
+ break;
+ } else
+ return -ENODEV;
+
+ case MFI_STATE_WAIT_HANDSHAKE:
+ /*
+ * Set the CLR bit in inbound doorbell
+ */
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ writel(
+ MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+ &instance->reg_set->doorbell);
+ } else {
+ writel(
+ MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+ &instance->reg_set->inbound_doorbell);
+ }
+
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_WAIT_HANDSHAKE;
+ break;
+
+ case MFI_STATE_BOOT_MESSAGE_PENDING:
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ writel(MFI_INIT_HOTPLUG,
+ &instance->reg_set->doorbell);
+ } else
+ writel(MFI_INIT_HOTPLUG,
+ &instance->reg_set->inbound_doorbell);
+
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
+ break;
+
+ case MFI_STATE_OPERATIONAL:
+ /*
+ * Bring it to READY state; assuming max wait 10 secs
+ */
+ instance->instancet->disable_intr(instance);
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_FURY)) {
+ writel(MFI_RESET_FLAGS,
+ &instance->reg_set->doorbell);
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ for (i = 0; i < (10 * 1000); i += 20) {
+ if (readl(
+ &instance->
+ reg_set->
+ doorbell) & 1)
+ msleep(20);
+ else
+ break;
+ }
+ }
+ } else
+ writel(MFI_RESET_FLAGS,
+ &instance->reg_set->inbound_doorbell);
+
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_OPERATIONAL;
+ break;
+
+ case MFI_STATE_UNDEFINED:
+ /*
+ * This state should not last for more than 2 seconds
+ */
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_UNDEFINED;
+ break;
+
+ case MFI_STATE_BB_INIT:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_BB_INIT;
+ break;
+
+ case MFI_STATE_FW_INIT:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FW_INIT;
+ break;
+
+ case MFI_STATE_FW_INIT_2:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FW_INIT_2;
+ break;
+
+ case MFI_STATE_DEVICE_SCAN:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_DEVICE_SCAN;
+ break;
+
+ case MFI_STATE_FLUSH_CACHE:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FLUSH_CACHE;
+ break;
+
+ default:
+ printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
+ fw_state);
+ return -ENODEV;
+ }
+
+ /*
+ * The cur_state should not last for more than max_wait secs
+ */
+ for (i = 0; i < (max_wait * 1000); i++) {
+ curr_abs_state = instance->instancet->
+ read_fw_status_reg(instance->reg_set);
+
+ if (abs_state == curr_abs_state) {
+ msleep(1);
+ } else
+ break;
+ }
+
+ /*
+ * Return error if fw_state hasn't changed after max_wait
+ */
+ if (curr_abs_state == abs_state) {
+ printk(KERN_DEBUG "FW state [%d] hasn't changed "
+ "in %d secs\n", fw_state, max_wait);
+ return -ENODEV;
+ }
+
+ abs_state = curr_abs_state;
+ fw_state = curr_abs_state & MFI_STATE_MASK;
+ }
+ printk(KERN_INFO "megasas: FW now in Ready state\n");
+
+ return 0;
+}
+
+/**
+ * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
+ * @instance: Adapter soft state
+ */
+static void megasas_teardown_frame_pool(struct megasas_instance *instance)
+{
+ int i;
+ u32 max_cmd = instance->max_mfi_cmds;
+ struct megasas_cmd *cmd;
+
+ if (!instance->frame_dma_pool)
+ return;
+
+ /*
+ * Return all frames to pool
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ if (cmd->frame)
+ pci_pool_free(instance->frame_dma_pool, cmd->frame,
+ cmd->frame_phys_addr);
+
+ if (cmd->sense)
+ pci_pool_free(instance->sense_dma_pool, cmd->sense,
+ cmd->sense_phys_addr);
+ }
+
+ /*
+ * Now destroy the pool itself
+ */
+ pci_pool_destroy(instance->frame_dma_pool);
+ pci_pool_destroy(instance->sense_dma_pool);
+
+ instance->frame_dma_pool = NULL;
+ instance->sense_dma_pool = NULL;
+}
+
+/**
+ * megasas_create_frame_pool - Creates DMA pool for cmd frames
+ * @instance: Adapter soft state
+ *
+ * Each command packet has an embedded DMA memory buffer that is used for
+ * filling MFI frame and the SG list that immediately follows the frame. This
+ * function creates those DMA memory buffers for each command packet by using
+ * PCI pool facility.
+ */
+static int megasas_create_frame_pool(struct megasas_instance *instance)
+{
+ int i;
+ u32 max_cmd;
+ u32 sge_sz;
+ u32 total_sz;
+ u32 frame_count;
+ struct megasas_cmd *cmd;
+
+ max_cmd = instance->max_mfi_cmds;
+
+ /*
+ * Size of our frame is 64 bytes for MFI frame, followed by max SG
+ * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
+ */
+ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+ sizeof(struct megasas_sge32);
+
+ if (instance->flag_ieee) {
+ sge_sz = sizeof(struct megasas_sge_skinny);
+ }
+
+ /*
+ * For MFI controllers.
+ * max_num_sge = 60
+ * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
+ * Total 960 byte (15 MFI frame of 64 byte)
+ *
+ * Fusion adapter require only 3 extra frame.
+ * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
+ * max_sge_sz = 12 byte (sizeof megasas_sge64)
+ * Total 192 byte (3 MFI frame of 64 byte)
+ */
+ frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
+ total_sz = MEGAMFI_FRAME_SIZE * frame_count;
+ /*
+ * Use DMA pool facility provided by PCI layer
+ */
+ instance->frame_dma_pool = pci_pool_create("megasas frame pool",
+ instance->pdev, total_sz, 256, 0);
+
+ if (!instance->frame_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
+ return -ENOMEM;
+ }
+
+ instance->sense_dma_pool = pci_pool_create("megasas sense pool",
+ instance->pdev, 128, 4, 0);
+
+ if (!instance->sense_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
+
+ pci_pool_destroy(instance->frame_dma_pool);
+ instance->frame_dma_pool = NULL;
+
+ return -ENOMEM;
+ }
+
+ /*
+ * Allocate and attach a frame to each of the commands in cmd_list.
+ * By making cmd->index as the context instead of the &cmd, we can
+ * always use 32bit context regardless of the architecture
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
+ GFP_KERNEL, &cmd->frame_phys_addr);
+
+ cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
+ GFP_KERNEL, &cmd->sense_phys_addr);
+
+ /*
+ * megasas_teardown_frame_pool() takes care of freeing
+ * whatever has been allocated
+ */
+ if (!cmd->frame || !cmd->sense) {
+ printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
+ megasas_teardown_frame_pool(instance);
+ return -ENOMEM;
+ }
+
+ memset(cmd->frame, 0, total_sz);
+ cmd->frame->io.context = cpu_to_le32(cmd->index);
+ cmd->frame->io.pad_0 = 0;
+ if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
+ (reset_devices))
+ cmd->frame->hdr.cmd = MFI_CMD_INVALID;
+ }
+
+ return 0;
+}
+
+/**
+ * megasas_free_cmds - Free all the cmds in the free cmd pool
+ * @instance: Adapter soft state
+ */
+void megasas_free_cmds(struct megasas_instance *instance)
+{
+ int i;
+ /* First free the MFI frame pool */
+ megasas_teardown_frame_pool(instance);
+
+ /* Free all the commands in the cmd_list */
+ for (i = 0; i < instance->max_mfi_cmds; i++)
+
+ kfree(instance->cmd_list[i]);
+
+ /* Free the cmd_list buffer itself */
+ kfree(instance->cmd_list);
+ instance->cmd_list = NULL;
+
+ INIT_LIST_HEAD(&instance->cmd_pool);
+}
+
+/**
+ * megasas_alloc_cmds - Allocates the command packets
+ * @instance: Adapter soft state
+ *
+ * Each command that is issued to the FW, whether IO commands from the OS or
+ * internal commands like IOCTLs, are wrapped in local data structure called
+ * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
+ * the FW.
+ *
+ * Each frame has a 32-bit field called context (tag). This context is used
+ * to get back the megasas_cmd from the frame when a frame gets completed in
+ * the ISR. Typically the address of the megasas_cmd itself would be used as
+ * the context. But we wanted to keep the differences between 32 and 64 bit
+ * systems to the mininum. We always use 32 bit integers for the context. In
+ * this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd given the context. The
+ * free commands themselves are maintained in a linked list called cmd_pool.
+ */
+int megasas_alloc_cmds(struct megasas_instance *instance)
+{
+ int i;
+ int j;
+ u32 max_cmd;
+ struct megasas_cmd *cmd;
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+ max_cmd = instance->max_mfi_cmds;
+
+ /*
+ * instance->cmd_list is an array of struct megasas_cmd pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
+
+ if (!instance->cmd_list) {
+ printk(KERN_DEBUG "megasas: out of memory\n");
+ return -ENOMEM;
+ }
+
+ memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
+
+ for (i = 0; i < max_cmd; i++) {
+ instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
+ GFP_KERNEL);
+
+ if (!instance->cmd_list[i]) {
+
+ for (j = 0; j < i; j++)
+ kfree(instance->cmd_list[j]);
+
+ kfree(instance->cmd_list);
+ instance->cmd_list = NULL;
+
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < max_cmd; i++) {
+ cmd = instance->cmd_list[i];
+ memset(cmd, 0, sizeof(struct megasas_cmd));
+ cmd->index = i;
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
+ cmd->scmd = NULL;
+ cmd->instance = instance;
+
+ list_add_tail(&cmd->list, &instance->cmd_pool);
+ }
+
+ /*
+ * Create a frame pool and assign one frame to each cmd
+ */
+ if (megasas_create_frame_pool(instance)) {
+ printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+ megasas_free_cmds(instance);
+ }
+
+ return 0;
+}
+
+/*
+ * megasas_get_pd_list_info - Returns FW's pd_list structure
+ * @instance: Adapter soft state
+ * @pd_list: pd_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_pd_list(struct megasas_instance *instance)
+{
+ int ret = 0, pd_index = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_PD_LIST *ci;
+ struct MR_PD_ADDRESS *pd_addr;
+ dma_addr_t ci_h = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem for pd_list\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
+ dcmd->mbox.b[1] = 0;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ /*
+ * the following function will get the instance PD LIST.
+ */
+
+ pd_addr = ci->addr;
+
+ if ( ret == 0 &&
+ (le32_to_cpu(ci->count) <
+ (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
+
+ memset(instance->local_pd_list, 0,
+ MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+
+ for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
+
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
+ le16_to_cpu(pd_addr->deviceId);
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
+ pd_addr->scsiDevType;
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
+ MR_PD_STATE_SYSTEM;
+ pd_addr++;
+ }
+ memcpy(instance->pd_list, instance->local_pd_list,
+ sizeof(instance->pd_list));
+ }
+
+ pci_free_consistent(instance->pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+ ci, ci_h);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
+/*
+ * megasas_get_ld_list_info - Returns FW's ld_list structure
+ * @instance: Adapter soft state
+ * @ld_list: ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_ld_list(struct megasas_instance *instance)
+{
+ int ret = 0, ld_index = 0, ids = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_LIST *ci;
+ dma_addr_t ci_h = 0;
+ u32 ld_count;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_LIST),
+ &ci_h);
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ if (instance->supportmax256vd)
+ dcmd->mbox.b[0] = 1;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
+ dcmd->pad_0 = 0;
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+
+ ld_count = le32_to_cpu(ci->ldCount);
+
+ /* the following function will get the instance PD LIST */
+
+ if ((ret == 0) && (ld_count <= instance->fw_supported_vd_count)) {
+ memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
+
+ for (ld_index = 0; ld_index < ld_count; ld_index++) {
+ if (ci->ldList[ld_index].state != 0) {
+ ids = ci->ldList[ld_index].ref.targetId;
+ instance->ld_ids[ids] =
+ ci->ldList[ld_index].ref.targetId;
+ }
+ }
+ }
+
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MR_LD_LIST),
+ ci,
+ ci_h);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
+/**
+ * megasas_ld_list_query - Returns FW's ld_list structure
+ * @instance: Adapter soft state
+ * @ld_list: ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
+{
+ int ret = 0, ld_index = 0, ids = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_TARGETID_LIST *ci;
+ dma_addr_t ci_h = 0;
+ u32 tgtid_count;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_WARNING
+ "megasas:(megasas_ld_list_query): Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
+
+ if (!ci) {
+ printk(KERN_WARNING
+ "megasas: Failed to alloc mem for ld_list_query\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = query_type;
+ if (instance->supportmax256vd)
+ dcmd->mbox.b[2] = 1;
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+ dcmd->pad_0 = 0;
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ tgtid_count = le32_to_cpu(ci->count);
+
+ if ((ret == 0) && (tgtid_count <= (instance->fw_supported_vd_count))) {
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
+ ids = ci->targetId[ld_index];
+ instance->ld_ids[ids] = ci->targetId[ld_index];
+ }
+
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ ci, ci_h);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
+/*
+ * megasas_update_ext_vd_details : Update details w.r.t Extended VD
+ * instance : Controller's instance
+*/
+static void megasas_update_ext_vd_details(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+ u32 old_map_sz;
+ u32 new_map_sz;
+
+ fusion = instance->ctrl_context;
+ /* For MFI based controllers return dummy success */
+ if (!fusion)
+ return;
+
+ instance->supportmax256vd =
+ instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
+ /* Below is additional check to address future FW enhancement */
+ if (instance->ctrl_info->max_lds > 64)
+ instance->supportmax256vd = 1;
+
+ instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
+ * MEGASAS_MAX_DEV_PER_CHANNEL;
+ instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
+ * MEGASAS_MAX_DEV_PER_CHANNEL;
+ if (instance->supportmax256vd) {
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ } else {
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ }
+ dev_info(&instance->pdev->dev, "Firmware supports %d VD %d PD\n",
+ instance->fw_supported_vd_count,
+ instance->fw_supported_pd_count);
+ dev_info(&instance->pdev->dev, "Driver supports %d VD %d PD\n",
+ instance->drv_supported_vd_count,
+ instance->drv_supported_pd_count);
+
+ old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) *
+ (instance->fw_supported_vd_count - 1));
+ new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
+ fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) *
+ (instance->drv_supported_vd_count - 1));
+
+ fusion->max_map_sz = max(old_map_sz, new_map_sz);
+
+
+ if (instance->supportmax256vd)
+ fusion->current_map_sz = new_map_sz;
+ else
+ fusion->current_map_sz = old_map_sz;
+
+}
+
+/**
+ * megasas_get_controller_info - Returns FW's controller structure
+ * @instance: Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller structure.
+ * This information is mainly used to find out the maximum IO transfer per
+ * command supported by the FW.
+ */
+int
+megasas_get_ctrl_info(struct megasas_instance *instance)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct megasas_ctrl_info *ci;
+ struct megasas_ctrl_info *ctrl_info;
+ dma_addr_t ci_h = 0;
+
+ ctrl_info = instance->ctrl_info;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct megasas_ctrl_info), &ci_h);
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+ dcmd->mbox.b[0] = 1;
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ if (!ret) {
+ memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
+ le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
+ le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
+ le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
+ megasas_update_ext_vd_details(instance);
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
+ ci, ci_h);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
+/*
+ * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
+ * to firmware
+ *
+ * @instance: Adapter soft state
+ * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
+ MR_CRASH_BUF_TURN_OFF = 0
+ MR_CRASH_BUF_TURN_ON = 1
+ * @return 0 on success non-zero on failure.
+ * Issues an internal command (DCMD) to set parameters for crash dump feature.
+ * Driver will send address of crash dump DMA buffer and set mbox to tell FW
+ * that driver supports crash dump feature. This DCMD will be sent only if
+ * crash dump feature is supported by the FW.
+ *
+ */
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+ u8 crash_buf_state)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ dcmd->mbox.b[0] = crash_buf_state;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
+/**
+ * megasas_issue_init_mfi - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * Issues the INIT MFI cmd
+ */
+static int
+megasas_issue_init_mfi(struct megasas_instance *instance)
+{
+ u32 context;
+
+ struct megasas_cmd *cmd;
+
+ struct megasas_init_frame *init_frame;
+ struct megasas_init_queue_info *initq_info;
+ dma_addr_t init_frame_h;
+ dma_addr_t initq_info_h;
+
+ /*
+ * Prepare a init frame. Note the init frame points to queue info
+ * structure. Each frame has SGL allocated after first 64 bytes. For
+ * this frame - since we don't need any SGL - we use SGL's space as
+ * queue info structure
+ *
+ * We will not get a NULL command below. We just created the pool.
+ */
+ cmd = megasas_get_cmd(instance);
+
+ init_frame = (struct megasas_init_frame *)cmd->frame;
+ initq_info = (struct megasas_init_queue_info *)
+ ((unsigned long)init_frame + 64);
+
+ init_frame_h = cmd->frame_phys_addr;
+ initq_info_h = init_frame_h + 64;
+
+ context = init_frame->context;
+ memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+ memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
+ init_frame->context = context;
+
+ initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
+ initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
+
+ initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
+ initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
+
+ init_frame->cmd = MFI_CMD_INIT;
+ init_frame->cmd_status = 0xFF;
+ init_frame->queue_info_new_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(initq_info_h));
+ init_frame->queue_info_new_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(initq_info_h));
+
+ init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
+
+ /*
+ * disable the intr before firing the init frame to FW
+ */
+ instance->instancet->disable_intr(instance);
+
+ /*
+ * Issue the init frame in polled mode
+ */
+
+ if (megasas_issue_polled(instance, cmd)) {
+ printk(KERN_ERR "megasas: Failed to init firmware\n");
+ megasas_return_cmd(instance, cmd);
+ goto fail_fw_init;
+ }
+
+ megasas_return_cmd(instance, cmd);
+
+ return 0;
+
+fail_fw_init:
+ return -EINVAL;
+}
+
+static u32
+megasas_init_adapter_mfi(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *reg_set;
+ u32 context_sz;
+ u32 reply_q_sz;
+
+ reg_set = instance->reg_set;
+
+ /*
+ * Get various operational parameters from status register
+ */
+ instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ /*
+ * Reduce the max supported cmds by 1. This is to ensure that the
+ * reply_q_sz (1 more than the max cmd that driver may send)
+ * does not exceed max cmds that the FW can support
+ */
+ instance->max_fw_cmds = instance->max_fw_cmds-1;
+ instance->max_mfi_cmds = instance->max_fw_cmds;
+ instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
+ 0x10;
+ /*
+ * Create a pool of commands
+ */
+ if (megasas_alloc_cmds(instance))
+ goto fail_alloc_cmds;
+
+ /*
+ * Allocate memory for reply queue. Length of reply queue should
+ * be _one_ more than the maximum commands handled by the firmware.
+ *
+ * Note: When FW completes commands, it places corresponding contex
+ * values in this circular reply queue. This circular queue is a fairly
+ * typical producer-consumer queue. FW is the producer (of completed
+ * commands) and the driver is the consumer.
+ */
+ context_sz = sizeof(u32);
+ reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
+
+ instance->reply_queue = pci_alloc_consistent(instance->pdev,
+ reply_q_sz,
+ &instance->reply_queue_h);
+
+ if (!instance->reply_queue) {
+ printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
+ goto fail_reply_queue;
+ }
+
+ if (megasas_issue_init_mfi(instance))
+ goto fail_fw_init;
+
+ if (megasas_get_ctrl_info(instance)) {
+ dev_err(&instance->pdev->dev, "(%d): Could get controller info "
+ "Fail from %s %d\n", instance->unique_id,
+ __func__, __LINE__);
+ goto fail_fw_init;
+ }
+
+ instance->fw_support_ieee = 0;
+ instance->fw_support_ieee =
+ (instance->instancet->read_fw_status_reg(reg_set) &
+ 0x04000000);
+
+ printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d",
+ instance->fw_support_ieee);
+
+ if (instance->fw_support_ieee)
+ instance->flag_ieee = 1;
+
+ return 0;
+
+fail_fw_init:
+
+ pci_free_consistent(instance->pdev, reply_q_sz,
+ instance->reply_queue, instance->reply_queue_h);
+fail_reply_queue:
+ megasas_free_cmds(instance);
+
+fail_alloc_cmds:
+ return 1;
+}
+
+/**
+ * megasas_init_fw - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * This is the main function for initializing firmware
+ */
+
+static int megasas_init_fw(struct megasas_instance *instance)
+{
+ u32 max_sectors_1;
+ u32 max_sectors_2;
+ u32 tmp_sectors, msix_enable, scratch_pad_2;
+ resource_size_t base_addr;
+ struct megasas_register_set __iomem *reg_set;
+ struct megasas_ctrl_info *ctrl_info = NULL;
+ unsigned long bar_list;
+ int i, loop, fw_msix_count = 0;
+ struct IOV_111 *iovPtr;
+
+ /* Find first memory bar */
+ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
+ instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
+ if (pci_request_selected_regions(instance->pdev, instance->bar,
+ "megasas: LSI")) {
+ printk(KERN_DEBUG "megasas: IO memory region busy!\n");
+ return -EBUSY;
+ }
+
+ base_addr = pci_resource_start(instance->pdev, instance->bar);
+ instance->reg_set = ioremap_nocache(base_addr, 8192);
+
+ if (!instance->reg_set) {
+ printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
+ goto fail_ioremap;
+ }
+
+ reg_set = instance->reg_set;
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ instance->instancet = &megasas_instance_template_fusion;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS1078R:
+ case PCI_DEVICE_ID_LSI_SAS1078DE:
+ instance->instancet = &megasas_instance_template_ppc;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS1078GEN2:
+ case PCI_DEVICE_ID_LSI_SAS0079GEN2:
+ instance->instancet = &megasas_instance_template_gen2;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
+ case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
+ instance->instancet = &megasas_instance_template_skinny;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS1064R:
+ case PCI_DEVICE_ID_DELL_PERC5:
+ default:
+ instance->instancet = &megasas_instance_template_xscale;
+ break;
+ }
+
+ if (megasas_transition_to_ready(instance, 0)) {
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset
+ (instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+ dev_info(&instance->pdev->dev,
+ "megasas: FW restarted successfully from %s!\n",
+ __func__);
+
+ /*waitting for about 30 second before retry*/
+ ssleep(30);
+
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+ }
+
+ /*
+ * MSI-X host index 0 is common for all adapter.
+ * It is used for all MPT based Adapters.
+ */
+ instance->reply_post_host_index_addr[0] =
+ (u32 *)((u8 *)instance->reg_set +
+ MPI2_REPLY_POST_HOST_INDEX_OFFSET);
+
+ /* Check if MSI-X is supported while in ready state */
+ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+ 0x4000000) >> 0x1a;
+ if (msix_enable && !msix_disable) {
+ scratch_pad_2 = readl
+ (&instance->reg_set->outbound_scratch_pad_2);
+ /* Check max MSI-X vectors */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) {
+ instance->msix_vectors = (scratch_pad_2
+ & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
+ fw_msix_count = instance->msix_vectors;
+ if (msix_vectors)
+ instance->msix_vectors =
+ min(msix_vectors,
+ instance->msix_vectors);
+ } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)
+ || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ /* Invader/Fury supports more than 8 MSI-X */
+ instance->msix_vectors = ((scratch_pad_2
+ & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
+ >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
+ fw_msix_count = instance->msix_vectors;
+ /* Save 1-15 reply post index address to local memory
+ * Index 0 is already saved from reg offset
+ * MPI2_REPLY_POST_HOST_INDEX_OFFSET
+ */
+ for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
+ instance->reply_post_host_index_addr[loop] =
+ (u32 *)((u8 *)instance->reg_set +
+ MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
+ + (loop * 0x10));
+ }
+ if (msix_vectors)
+ instance->msix_vectors = min(msix_vectors,
+ instance->msix_vectors);
+ } else
+ instance->msix_vectors = 1;
+ /* Don't bother allocating more MSI-X vectors than cpus */
+ instance->msix_vectors = min(instance->msix_vectors,
+ (unsigned int)num_online_cpus());
+ for (i = 0; i < instance->msix_vectors; i++)
+ instance->msixentry[i].entry = i;
+ i = pci_enable_msix_range(instance->pdev, instance->msixentry,
+ 1, instance->msix_vectors);
+ if (i > 0)
+ instance->msix_vectors = i;
+ else
+ instance->msix_vectors = 0;
+
+ dev_info(&instance->pdev->dev, "[scsi%d]: FW supports"
+ "<%d> MSIX vector,Online CPUs: <%d>,"
+ "Current MSIX <%d>\n", instance->host->host_no,
+ fw_msix_count, (unsigned int)num_online_cpus(),
+ instance->msix_vectors);
+ }
+
+ instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
+ GFP_KERNEL);
+ if (instance->ctrl_info == NULL)
+ goto fail_init_adapter;
+
+ /*
+ * Below are default value for legacy Firmware.
+ * non-fusion based controllers
+ */
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ /* Get operational params, sge flags, send init cmd to controller */
+ if (instance->instancet->init_adapter(instance))
+ goto fail_init_adapter;
+
+ printk(KERN_ERR "megasas: INIT adapter done\n");
+
+ /** for passthrough
+ * the following function will get the PD LIST.
+ */
+
+ memset(instance->pd_list, 0 ,
+ (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
+ if (megasas_get_pd_list(instance) < 0) {
+ printk(KERN_ERR "megasas: failed to get PD list\n");
+ goto fail_init_adapter;
+ }
+
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+
+ /*
+ * Compute the max allowed sectors per IO: The controller info has two
+ * limits on max sectors. Driver should use the minimum of these two.
+ *
+ * 1 << stripe_sz_ops.min = max sectors per strip
+ *
+ * Note that older firmwares ( < FW ver 30) didn't report information
+ * to calculate max_sectors_1. So the number ended up as zero always.
+ */
+ tmp_sectors = 0;
+ ctrl_info = instance->ctrl_info;
+
+ max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+ le16_to_cpu(ctrl_info->max_strips_per_io);
+ max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
+
+ tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
+
+ /*Check whether controller is iMR or MR */
+ if (ctrl_info->memory_size) {
+ instance->is_imr = 0;
+ dev_info(&instance->pdev->dev, "Controller type: MR,"
+ "Memory size is: %dMB\n",
+ le16_to_cpu(ctrl_info->memory_size));
+ } else {
+ instance->is_imr = 1;
+ dev_info(&instance->pdev->dev,
+ "Controller type: iMR\n");
+ }
+ instance->disableOnlineCtrlReset =
+ ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ instance->mpio = ctrl_info->adapterOperations2.mpio;
+ instance->UnevenSpanSupport =
+ ctrl_info->adapterOperations2.supportUnevenSpans;
+ if (instance->UnevenSpanSupport) {
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ dev_info(&instance->pdev->dev, "FW supports: "
+ "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
+ if (MR_ValidateMapInfo(instance))
+ fusion->fast_path_io = 1;
+ else
+ fusion->fast_path_io = 0;
+
+ }
+ if (ctrl_info->host_interface.SRIOV) {
+ if (!ctrl_info->adapterOperations2.activePassive)
+ instance->PlasmaFW111 = 1;
+
+ if (!instance->PlasmaFW111)
+ instance->requestorId =
+ ctrl_info->iov.requestorId;
+ else {
+ iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
+ instance->requestorId = iovPtr->requestorId;
+ }
+ dev_warn(&instance->pdev->dev, "I am VF "
+ "requestorId %d\n", instance->requestorId);
+ }
+
+ instance->crash_dump_fw_support =
+ ctrl_info->adapterOperations3.supportCrashDump;
+ instance->crash_dump_drv_support =
+ (instance->crash_dump_fw_support &&
+ instance->crash_dump_buf);
+ if (instance->crash_dump_drv_support) {
+ dev_info(&instance->pdev->dev, "Firmware Crash dump "
+ "feature is supported\n");
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_OFF);
+
+ } else {
+ if (instance->crash_dump_buf)
+ pci_free_consistent(instance->pdev,
+ CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf,
+ instance->crash_dump_h);
+ instance->crash_dump_buf = NULL;
+ }
+
+ instance->secure_jbod_support =
+ ctrl_info->adapterOperations3.supportSecurityonJBOD;
+ if (instance->secure_jbod_support)
+ dev_info(&instance->pdev->dev, "Firmware supports Secure JBOD\n");
+ instance->max_sectors_per_req = instance->max_num_sge *
+ PAGE_SIZE / 512;
+ if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
+ instance->max_sectors_per_req = tmp_sectors;
+
+ /*
+ * 1. For fusion adapters, 3 commands for IOCTL and 5 commands
+ * for driver's internal DCMDs.
+ * 2. For MFI skinny adapters, 5 commands for IOCTL + driver's
+ * internal DCMDs.
+ * 3. For rest of MFI adapters, 27 commands reserved for IOCTLs
+ * and 5 commands for drivers's internal DCMD.
+ */
+ if (instance->ctrl_context) {
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+ } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ MEGASAS_SKINNY_INT_CMDS;
+ sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
+ } else {
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ MEGASAS_INT_CMDS;
+ sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
+ }
+
+ /* Check for valid throttlequeuedepth module parameter */
+ if (throttlequeuedepth &&
+ throttlequeuedepth <= instance->max_scsi_cmds)
+ instance->throttlequeuedepth = throttlequeuedepth;
+ else
+ instance->throttlequeuedepth =
+ MEGASAS_THROTTLE_QUEUE_DEPTH;
+
+ /*
+ * Setup tasklet for cmd completion
+ */
+
+ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
+ (unsigned long)instance);
+
+ /* Launch SR-IOV heartbeat timer */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 1))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
+ return 0;
+
+fail_init_adapter:
+fail_ready_state:
+ kfree(instance->ctrl_info);
+ instance->ctrl_info = NULL;
+ iounmap(instance->reg_set);
+
+ fail_ioremap:
+ pci_release_selected_regions(instance->pdev, instance->bar);
+
+ return -EINVAL;
+}
+
+/**
+ * megasas_release_mfi - Reverses the FW initialization
+ * @intance: Adapter soft state
+ */
+static void megasas_release_mfi(struct megasas_instance *instance)
+{
+ u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
+
+ if (instance->reply_queue)
+ pci_free_consistent(instance->pdev, reply_q_sz,
+ instance->reply_queue, instance->reply_queue_h);
+
+ megasas_free_cmds(instance);
+
+ iounmap(instance->reg_set);
+
+ pci_release_selected_regions(instance->pdev, instance->bar);
+}
+
+/**
+ * megasas_get_seq_num - Gets latest event sequence numbers
+ * @instance: Adapter soft state
+ * @eli: FW event log sequence numbers information
+ *
+ * FW maintains a log of all events in a non-volatile area. Upper layers would
+ * usually find out the latest sequence number of the events, the seq number at
+ * the boot etc. They would "read" all the events below the latest seq number
+ * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
+ * number), they would subsribe to AEN (asynchronous event notification) and
+ * wait for the events to happen.
+ */
+static int
+megasas_get_seq_num(struct megasas_instance *instance,
+ struct megasas_evt_log_info *eli)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct megasas_evt_log_info *el_info;
+ dma_addr_t el_info_h = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+ el_info = pci_alloc_consistent(instance->pdev,
+ sizeof(struct megasas_evt_log_info),
+ &el_info_h);
+
+ if (!el_info) {
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(el_info, 0, sizeof(*el_info));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+ else {
+ /*
+ * Copy the data back into callers buffer
+ */
+ eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
+ eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
+ eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
+ eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
+ eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
+ el_info, el_info_h);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return 0;
+}
+
+/**
+ * megasas_register_aen - Registers for asynchronous event notification
+ * @instance: Adapter soft state
+ * @seq_num: The starting sequence number
+ * @class_locale: Class of the event
+ *
+ * This function subscribes for AEN for events beyond the @seq_num. It requests
+ * to be notified if and only if the event is of type @class_locale
+ */
+static int
+megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
+ u32 class_locale_word)
+{
+ int ret_val;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ union megasas_evt_class_locale curr_aen;
+ union megasas_evt_class_locale prev_aen;
+
+ /*
+ * If there an AEN pending already (aen_cmd), check if the
+ * class_locale of that pending AEN is inclusive of the new
+ * AEN request we currently have. If it is, then we don't have
+ * to do anything. In other words, whichever events the current
+ * AEN request is subscribing to, have already been subscribed
+ * to.
+ *
+ * If the old_cmd is _not_ inclusive, then we have to abort
+ * that command, form a class_locale that is superset of both
+ * old and current and re-issue to the FW
+ */
+
+ curr_aen.word = class_locale_word;
+
+ if (instance->aen_cmd) {
+
+ prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
+ prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale);
+
+ /*
+ * A class whose enum value is smaller is inclusive of all
+ * higher values. If a PROGRESS (= -1) was previously
+ * registered, then a new registration requests for higher
+ * classes need not be sent to FW. They are automatically
+ * included.
+ *
+ * Locale numbers don't have such hierarchy. They are bitmap
+ * values
+ */
+ if ((prev_aen.members.class <= curr_aen.members.class) &&
+ !((prev_aen.members.locale & curr_aen.members.locale) ^
+ curr_aen.members.locale)) {
+ /*
+ * Previously issued event registration includes
+ * current request. Nothing to do.
+ */
+ return 0;
+ } else {
+ curr_aen.members.locale |= prev_aen.members.locale;
+
+ if (prev_aen.members.class < curr_aen.members.class)
+ curr_aen.members.class = prev_aen.members.class;
+
+ instance->aen_cmd->abort_aen = 1;
+ ret_val = megasas_issue_blocked_abort_cmd(instance,
+ instance->
+ aen_cmd, 30);
+
+ if (ret_val) {
+ printk(KERN_DEBUG "megasas: Failed to abort "
+ "previous AEN command\n");
+ return ret_val;
+ }
+ }
+ }
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return -ENOMEM;
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
+
+ /*
+ * Prepare DCMD for aen registration
+ */
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
+ dcmd->mbox.w[0] = cpu_to_le32(seq_num);
+ instance->last_seq_num = seq_num;
+ dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
+
+ if (instance->aen_cmd != NULL) {
+ megasas_return_cmd(instance, cmd);
+ return 0;
+ }
+
+ /*
+ * Store reference to the cmd used to register for AEN. When an
+ * application wants us to register for AEN, we have to abort this
+ * cmd and re-register with a new EVENT LOCALE supplied by that app
+ */
+ instance->aen_cmd = cmd;
+
+ /*
+ * Issue the aen registration frame
+ */
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ return 0;
+}
+
+/**
+ * megasas_start_aen - Subscribes to AEN during driver load time
+ * @instance: Adapter soft state
+ */
+static int megasas_start_aen(struct megasas_instance *instance)
+{
+ struct megasas_evt_log_info eli;
+ union megasas_evt_class_locale class_locale;
+
+ /*
+ * Get the latest sequence number from FW
+ */
+ memset(&eli, 0, sizeof(eli));
+
+ if (megasas_get_seq_num(instance, &eli))
+ return -1;
+
+ /*
+ * Register AEN with FW for latest sequence number plus 1
+ */
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ return megasas_register_aen(instance,
+ eli.newest_seq_num + 1,
+ class_locale.word);
+}
+
+/**
+ * megasas_io_attach - Attaches this driver to SCSI mid-layer
+ * @instance: Adapter soft state
+ */
+static int megasas_io_attach(struct megasas_instance *instance)
+{
+ struct Scsi_Host *host = instance->host;
+
+ /*
+ * Export parameters required by SCSI mid-layer
+ */
+ host->irq = instance->pdev->irq;
+ host->unique_id = instance->unique_id;
+ host->can_queue = instance->max_scsi_cmds;
+ host->this_id = instance->init_id;
+ host->sg_tablesize = instance->max_num_sge;
+
+ if (instance->fw_support_ieee)
+ instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
+
+ /*
+ * Check if the module parameter value for max_sectors can be used
+ */
+ if (max_sectors && max_sectors < instance->max_sectors_per_req)
+ instance->max_sectors_per_req = max_sectors;
+ else {
+ if (max_sectors) {
+ if (((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
+ (max_sectors <= MEGASAS_MAX_SECTORS)) {
+ instance->max_sectors_per_req = max_sectors;
+ } else {
+ printk(KERN_INFO "megasas: max_sectors should be > 0"
+ "and <= %d (or < 1MB for GEN2 controller)\n",
+ instance->max_sectors_per_req);
+ }
+ }
+ }
+
+ host->max_sectors = instance->max_sectors_per_req;
+ host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
+ host->max_channel = MEGASAS_MAX_CHANNELS - 1;
+ host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
+ host->max_lun = MEGASAS_MAX_LUN;
+ host->max_cmd_len = 16;
+
+ /* Fusion only supports host reset */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ host->hostt->eh_device_reset_handler = NULL;
+ host->hostt->eh_bus_reset_handler = NULL;
+ }
+
+ /*
+ * Notify the mid-layer about the new controller
+ */
+ if (scsi_add_host(host, &instance->pdev->dev)) {
+ printk(KERN_DEBUG "megasas: scsi_add_host failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+megasas_set_dma_mask(struct pci_dev *pdev)
+{
+ /*
+ * All our contollers are capable of performing 64-bit DMA
+ */
+ if (IS_DMA64) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ goto fail_set_dma_mask;
+ }
+ } else {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ goto fail_set_dma_mask;
+ }
+ /*
+ * Ensure that all data structures are allocated in 32-bit
+ * memory.
+ */
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ /* Try 32bit DMA mask and 32 bit Consistent dma mask */
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ dev_info(&pdev->dev, "set 32bit DMA mask"
+ "and 32 bit consistent mask\n");
+ else
+ goto fail_set_dma_mask;
+ }
+
+ return 0;
+
+fail_set_dma_mask:
+ return 1;
+}
+
+/**
+ * megasas_probe_one - PCI hotplug entry point
+ * @pdev: PCI device structure
+ * @id: PCI ids of supported hotplugged adapter
+ */
+static int megasas_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rval, pos, i, j, cpu;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ u16 control = 0;
+ struct fusion_context *fusion = NULL;
+
+ /* Reset MSI-X in the kdump kernel */
+ if (reset_devices) {
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev,
+ pos + PCI_MSIX_FLAGS,
+ control &
+ ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+ }
+
+ /*
+ * Announce PCI information
+ */
+ printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+
+ printk("bus %d:slot %d:func %d\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device_mem(pdev);
+
+ if (rval) {
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ if (megasas_set_dma_mask(pdev))
+ goto fail_set_dma_mask;
+
+ host = scsi_host_alloc(&megasas_template,
+ sizeof(struct megasas_instance));
+
+ if (!host) {
+ printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
+ goto fail_alloc_instance;
+ }
+
+ instance = (struct megasas_instance *)host->hostdata;
+ memset(instance, 0, sizeof(*instance));
+ atomic_set( &instance->fw_reset_no_pci_access, 0 );
+ instance->pdev = pdev;
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ {
+ instance->ctrl_context_pages =
+ get_order(sizeof(struct fusion_context));
+ instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
+ instance->ctrl_context_pages);
+ if (!instance->ctrl_context) {
+ printk(KERN_DEBUG "megasas: Failed to allocate "
+ "memory for Fusion context info\n");
+ goto fail_alloc_dma_buf;
+ }
+ fusion = instance->ctrl_context;
+ memset(fusion, 0,
+ ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
+ INIT_LIST_HEAD(&fusion->cmd_pool);
+ spin_lock_init(&fusion->mpt_pool_lock);
+ }
+ break;
+ default: /* For all other supported controllers */
+
+ instance->producer =
+ pci_alloc_consistent(pdev, sizeof(u32),
+ &instance->producer_h);
+ instance->consumer =
+ pci_alloc_consistent(pdev, sizeof(u32),
+ &instance->consumer_h);
+
+ if (!instance->producer || !instance->consumer) {
+ printk(KERN_DEBUG "megasas: Failed to allocate"
+ "memory for producer, consumer\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ *instance->producer = 0;
+ *instance->consumer = 0;
+ break;
+ }
+
+ /* Crash dump feature related initialisation*/
+ instance->drv_buf_index = 0;
+ instance->drv_buf_alloc = 0;
+ instance->crash_dump_fw_support = 0;
+ instance->crash_dump_app_support = 0;
+ instance->fw_crash_state = UNAVAILABLE;
+ spin_lock_init(&instance->crashdump_lock);
+ instance->crash_dump_buf = NULL;
+
+ if (!reset_devices)
+ instance->crash_dump_buf = pci_alloc_consistent(pdev,
+ CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h);
+ if (!instance->crash_dump_buf)
+ dev_err(&instance->pdev->dev, "Can't allocate Firmware "
+ "crash dump DMA buffer\n");
+
+ megasas_poll_wait_aen = 0;
+ instance->flag_ieee = 0;
+ instance->ev = NULL;
+ instance->issuepend_done = 1;
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ instance->is_imr = 0;
+
+ instance->evt_detail = pci_alloc_consistent(pdev,
+ sizeof(struct
+ megasas_evt_detail),
+ &instance->evt_detail_h);
+
+ if (!instance->evt_detail) {
+ printk(KERN_DEBUG "megasas: Failed to allocate memory for "
+ "event detail structure\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ /*
+ * Initialize locks and queues
+ */
+ INIT_LIST_HEAD(&instance->cmd_pool);
+ INIT_LIST_HEAD(&instance->internal_reset_pending_q);
+
+ atomic_set(&instance->fw_outstanding,0);
+
+ init_waitqueue_head(&instance->int_cmd_wait_q);
+ init_waitqueue_head(&instance->abort_cmd_wait_q);
+
+ spin_lock_init(&instance->mfi_pool_lock);
+ spin_lock_init(&instance->hba_lock);
+ spin_lock_init(&instance->completion_lock);
+
+ mutex_init(&instance->aen_mutex);
+ mutex_init(&instance->reset_mutex);
+
+ /*
+ * Initialize PCI related and misc parameters
+ */
+ instance->host = host;
+ instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+ instance->ctrl_info = NULL;
+
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
+ instance->flag_ieee = 1;
+
+ megasas_dbg_lvl = 0;
+ instance->flag = 0;
+ instance->unload = 1;
+ instance->last_time = 0;
+ instance->disableOnlineCtrlReset = 1;
+ instance->UnevenSpanSupport = 0;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
+ INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
+ } else
+ INIT_WORK(&instance->work_init, process_fw_state_change_wq);
+
+ /*
+ * Initialize MFI Firmware
+ */
+ if (megasas_init_fw(instance))
+ goto fail_init_mfi;
+
+ if (instance->requestorId) {
+ if (instance->PlasmaFW111) {
+ instance->vf_affiliation_111 =
+ pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &instance->vf_affiliation_111_h);
+ if (!instance->vf_affiliation_111)
+ printk(KERN_WARNING "megasas: Can't allocate "
+ "memory for VF affiliation buffer\n");
+ } else {
+ instance->vf_affiliation =
+ pci_alloc_consistent(pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &instance->vf_affiliation_h);
+ if (!instance->vf_affiliation)
+ printk(KERN_WARNING "megasas: Can't allocate "
+ "memory for VF affiliation buffer\n");
+ }
+ }
+
+retry_irq_register:
+ /*
+ * Register IRQ
+ */
+ if (instance->msix_vectors) {
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < instance->msix_vectors; i++) {
+ instance->irq_context[i].instance = instance;
+ instance->irq_context[i].MSIxIndex = i;
+ if (request_irq(instance->msixentry[i].vector,
+ instance->instancet->service_isr, 0,
+ "megasas",
+ &instance->irq_context[i])) {
+ printk(KERN_DEBUG "megasas: Failed to "
+ "register IRQ for vector %d.\n", i);
+ for (j = 0; j < i; j++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
+ free_irq(
+ instance->msixentry[j].vector,
+ &instance->irq_context[j]);
+ }
+ /* Retry irq register for IO_APIC */
+ instance->msix_vectors = 0;
+ goto retry_irq_register;
+ }
+ if (smp_affinity_enable) {
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev,
+ "Error setting affinity hint "
+ "for cpu %d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ }
+ } else {
+ instance->irq_context[0].instance = instance;
+ instance->irq_context[0].MSIxIndex = 0;
+ if (request_irq(pdev->irq, instance->instancet->service_isr,
+ IRQF_SHARED, "megasas",
+ &instance->irq_context[0])) {
+ printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
+ goto fail_irq;
+ }
+ }
+
+ instance->instancet->enable_intr(instance);
+
+ /*
+ * Store instance in PCI softstate
+ */
+ pci_set_drvdata(pdev, instance);
+
+ /*
+ * Add this controller to megasas_mgmt_info structure so that it
+ * can be exported to management applications
+ */
+ megasas_mgmt_info.count++;
+ megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
+ megasas_mgmt_info.max_index++;
+
+ /*
+ * Register with SCSI mid-layer
+ */
+ if (megasas_io_attach(instance))
+ goto fail_io_attach;
+
+ instance->unload = 0;
+ /*
+ * Trigger SCSI to scan our drives
+ */
+ scsi_scan_host(host);
+
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance)) {
+ printk(KERN_DEBUG "megasas: start aen failed\n");
+ goto fail_start_aen;
+ }
+
+ /* Get current SR-IOV LD/VF affiliation */
+ if (instance->requestorId)
+ megasas_get_ld_vf_affiliation(instance, 1);
+
+ return 0;
+
+ fail_start_aen:
+ fail_io_attach:
+ megasas_mgmt_info.count--;
+ megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
+ megasas_mgmt_info.max_index--;
+
+ instance->instancet->disable_intr(instance);
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+fail_irq:
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ megasas_release_fusion(instance);
+ else
+ megasas_release_mfi(instance);
+ fail_init_mfi:
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+ fail_alloc_dma_buf:
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (instance->producer)
+ pci_free_consistent(pdev, sizeof(u32), instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(pdev, sizeof(u32), instance->consumer,
+ instance->consumer_h);
+ scsi_host_put(host);
+
+ fail_alloc_instance:
+ fail_set_dma_mask:
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+}
+
+/**
+ * megasas_flush_cache - Requests FW to flush all its caches
+ * @instance: Adapter soft state
+ */
+static void megasas_flush_cache(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ return;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return;
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
+ dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
+
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ " from %s\n", __func__);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return;
+}
+
+/**
+ * megasas_shutdown_controller - Instructs FW to shutdown the controller
+ * @instance: Adapter soft state
+ * @opcode: Shutdown/Hibernate
+ */
+static void megasas_shutdown_controller(struct megasas_instance *instance,
+ u32 opcode)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ return;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return;
+
+ if (instance->aen_cmd)
+ megasas_issue_blocked_abort_cmd(instance,
+ instance->aen_cmd, 30);
+ if (instance->map_update_cmd)
+ megasas_issue_blocked_abort_cmd(instance,
+ instance->map_update_cmd, 30);
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = cpu_to_le32(opcode);
+
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return;
+}
+
+#ifdef CONFIG_PM
+/**
+ * megasas_suspend - driver suspend entry point
+ * @pdev: PCI device structure
+ * @state: PCI power state to suspend routine
+ */
+static int
+megasas_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ int i;
+
+ instance = pci_get_drvdata(pdev);
+ host = instance->host;
+ instance->unload = 1;
+
+ /* Shutdown SR-IOV heartbeat timer */
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
+ megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
+
+ /* cancel the delayed work if this work still in queue */
+ if (instance->ev != NULL) {
+ struct megasas_aen_event *ev = instance->ev;
+ cancel_delayed_work_sync(&ev->hotplug_work);
+ instance->ev = NULL;
+ }
+
+ tasklet_kill(&instance->isr_tasklet);
+
+ pci_set_drvdata(instance->pdev, instance);
+ instance->instancet->disable_intr(instance);
+
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+/**
+ * megasas_resume- driver resume entry point
+ * @pdev: PCI device structure
+ */
+static int
+megasas_resume(struct pci_dev *pdev)
+{
+ int rval, i, j, cpu;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+
+ instance = pci_get_drvdata(pdev);
+ host = instance->host;
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device_mem(pdev);
+
+ if (rval) {
+ printk(KERN_ERR "megasas: Enable device failed\n");
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ if (megasas_set_dma_mask(pdev))
+ goto fail_set_dma_mask;
+
+ /*
+ * Initialize MFI Firmware
+ */
+
+ atomic_set(&instance->fw_outstanding, 0);
+
+ /*
+ * We expect the FW state to be READY
+ */
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+
+ /* Now re-enable MSI-X */
+ if (instance->msix_vectors &&
+ pci_enable_msix_exact(instance->pdev, instance->msixentry,
+ instance->msix_vectors))
+ goto fail_reenable_msix;
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ {
+ megasas_reset_reply_desc(instance);
+ if (megasas_ioc_init_fusion(instance)) {
+ megasas_free_cmds(instance);
+ megasas_free_cmds_fusion(instance);
+ goto fail_init_mfi;
+ }
+ if (!megasas_get_map_info(instance))
+ megasas_sync_map_info(instance);
+ }
+ break;
+ default:
+ *instance->producer = 0;
+ *instance->consumer = 0;
+ if (megasas_issue_init_mfi(instance))
+ goto fail_init_mfi;
+ break;
+ }
+
+ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
+ (unsigned long)instance);
+
+ /*
+ * Register IRQ
+ */
+ if (instance->msix_vectors) {
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0 ; i < instance->msix_vectors; i++) {
+ instance->irq_context[i].instance = instance;
+ instance->irq_context[i].MSIxIndex = i;
+ if (request_irq(instance->msixentry[i].vector,
+ instance->instancet->service_isr, 0,
+ "megasas",
+ &instance->irq_context[i])) {
+ printk(KERN_DEBUG "megasas: Failed to "
+ "register IRQ for vector %d.\n", i);
+ for (j = 0; j < i; j++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
+ free_irq(
+ instance->msixentry[j].vector,
+ &instance->irq_context[j]);
+ }
+ goto fail_irq;
+ }
+
+ if (smp_affinity_enable) {
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev, "Error "
+ "setting affinity hint for cpu "
+ "%d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ }
+ } else {
+ instance->irq_context[0].instance = instance;
+ instance->irq_context[0].MSIxIndex = 0;
+ if (request_irq(pdev->irq, instance->instancet->service_isr,
+ IRQF_SHARED, "megasas",
+ &instance->irq_context[0])) {
+ printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
+ goto fail_irq;
+ }
+ }
+
+ /* Re-launch SR-IOV heartbeat timer */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 0))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
+ instance->instancet->enable_intr(instance);
+ instance->unload = 0;
+
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance))
+ printk(KERN_ERR "megasas: Start AEN failed\n");
+
+ return 0;
+
+fail_irq:
+fail_init_mfi:
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (instance->producer)
+ pci_free_consistent(pdev, sizeof(u32), instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(pdev, sizeof(u32), instance->consumer,
+ instance->consumer_h);
+ scsi_host_put(host);
+
+fail_set_dma_mask:
+fail_ready_state:
+fail_reenable_msix:
+
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+}
+#else
+#define megasas_suspend NULL
+#define megasas_resume NULL
+#endif
+
+/**
+ * megasas_detach_one - PCI hot"un"plug entry point
+ * @pdev: PCI device structure
+ */
+static void megasas_detach_one(struct pci_dev *pdev)
+{
+ int i;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ struct fusion_context *fusion;
+
+ instance = pci_get_drvdata(pdev);
+ instance->unload = 1;
+ host = instance->host;
+ fusion = instance->ctrl_context;
+
+ /* Shutdown SR-IOV heartbeat timer */
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
+ if (instance->fw_crash_state != UNAVAILABLE)
+ megasas_free_host_crash_buffer(instance);
+ scsi_remove_host(instance->host);
+ megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+ /* cancel the delayed work if this work still in queue*/
+ if (instance->ev != NULL) {
+ struct megasas_aen_event *ev = instance->ev;
+ cancel_delayed_work_sync(&ev->hotplug_work);
+ instance->ev = NULL;
+ }
+
+ /* cancel all wait events */
+ wake_up_all(&instance->int_cmd_wait_q);
+
+ tasklet_kill(&instance->isr_tasklet);
+
+ /*
+ * Take the instance off the instance array. Note that we will not
+ * decrement the max_index. We let this array be sparse array
+ */
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+ if (megasas_mgmt_info.instance[i] == instance) {
+ megasas_mgmt_info.count--;
+ megasas_mgmt_info.instance[i] = NULL;
+
+ break;
+ }
+ }
+
+ instance->instancet->disable_intr(instance);
+
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ megasas_release_fusion(instance);
+ for (i = 0; i < 2 ; i++) {
+ if (fusion->ld_map[i])
+ dma_free_coherent(&instance->pdev->dev,
+ fusion->max_map_sz,
+ fusion->ld_map[i],
+ fusion->ld_map_phys[i]);
+ if (fusion->ld_drv_map[i])
+ free_pages((ulong)fusion->ld_drv_map[i],
+ fusion->drv_map_pages);
+ }
+ free_pages((ulong)instance->ctrl_context,
+ instance->ctrl_context_pages);
+ break;
+ default:
+ megasas_release_mfi(instance);
+ pci_free_consistent(pdev, sizeof(u32),
+ instance->producer,
+ instance->producer_h);
+ pci_free_consistent(pdev, sizeof(u32),
+ instance->consumer,
+ instance->consumer_h);
+ break;
+ }
+
+ kfree(instance->ctrl_info);
+
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail, instance->evt_detail_h);
+
+ if (instance->vf_affiliation)
+ pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ instance->vf_affiliation,
+ instance->vf_affiliation_h);
+
+ if (instance->vf_affiliation_111)
+ pci_free_consistent(pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ instance->vf_affiliation_111,
+ instance->vf_affiliation_111_h);
+
+ if (instance->hb_host_mem)
+ pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+ instance->hb_host_mem,
+ instance->hb_host_mem_h);
+
+ if (instance->crash_dump_buf)
+ pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf, instance->crash_dump_h);
+
+ scsi_host_put(host);
+
+ pci_disable_device(pdev);
+
+ return;
+}
+
+/**
+ * megasas_shutdown - Shutdown entry point
+ * @device: Generic device structure
+ */
+static void megasas_shutdown(struct pci_dev *pdev)
+{
+ int i;
+ struct megasas_instance *instance = pci_get_drvdata(pdev);
+
+ instance->unload = 1;
+ megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+ instance->instancet->disable_intr(instance);
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+}
+
+/**
+ * megasas_mgmt_open - char node "open" entry point
+ */
+static int megasas_mgmt_open(struct inode *inode, struct file *filep)
+{
+ /*
+ * Allow only those users with admin rights
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return 0;
+}
+
+/**
+ * megasas_mgmt_fasync - Async notifier registration from applications
+ *
+ * This function adds the calling process to a driver global queue. When an
+ * event occurs, SIGIO will be sent to all processes in this queue.
+ */
+static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
+{
+ int rc;
+
+ mutex_lock(&megasas_async_queue_mutex);
+
+ rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
+
+ mutex_unlock(&megasas_async_queue_mutex);
+
+ if (rc >= 0) {
+ /* For sanity check when we get ioctl */
+ filep->private_data = filep;
+ return 0;
+ }
+
+ printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
+
+ return rc;
+}
+
+/**
+ * megasas_mgmt_poll - char node "poll" entry point
+ * */
+static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask;
+ unsigned long flags;
+ poll_wait(file, &megasas_poll_wait, wait);
+ spin_lock_irqsave(&poll_aen_lock, flags);
+ if (megasas_poll_wait_aen)
+ mask = (POLLIN | POLLRDNORM);
+
+ else
+ mask = 0;
+ megasas_poll_wait_aen = 0;
+ spin_unlock_irqrestore(&poll_aen_lock, flags);
+ return mask;
+}
+
+/*
+ * megasas_set_crash_dump_params_ioctl:
+ * Send CRASH_DUMP_MODE DCMD to all controllers
+ * @cmd: MFI command frame
+ */
+
+static int megasas_set_crash_dump_params_ioctl(
+ struct megasas_cmd *cmd)
+{
+ struct megasas_instance *local_instance;
+ int i, error = 0;
+ int crash_support;
+
+ crash_support = cmd->frame->dcmd.mbox.w[0];
+
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+ local_instance = megasas_mgmt_info.instance[i];
+ if (local_instance && local_instance->crash_dump_drv_support) {
+ if ((local_instance->adprecovery ==
+ MEGASAS_HBA_OPERATIONAL) &&
+ !megasas_set_crash_dump_params(local_instance,
+ crash_support)) {
+ local_instance->crash_dump_app_support =
+ crash_support;
+ dev_info(&local_instance->pdev->dev,
+ "Application firmware crash "
+ "dump mode set success\n");
+ error = 0;
+ } else {
+ dev_info(&local_instance->pdev->dev,
+ "Application firmware crash "
+ "dump mode set failed\n");
+ error = -1;
+ }
+ }
+ }
+ return error;
+}
+
+/**
+ * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
+ * @instance: Adapter soft state
+ * @argp: User's ioctl packet
+ */
+static int
+megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+ struct megasas_iocpacket __user * user_ioc,
+ struct megasas_iocpacket *ioc)
+{
+ struct megasas_sge32 *kern_sge32;
+ struct megasas_cmd *cmd;
+ void *kbuff_arr[MAX_IOCTL_SGE];
+ dma_addr_t buf_handle = 0;
+ int error = 0, i;
+ void *sense = NULL;
+ dma_addr_t sense_handle;
+ unsigned long *sense_ptr;
+
+ memset(kbuff_arr, 0, sizeof(kbuff_arr));
+
+ if (ioc->sge_count > MAX_IOCTL_SGE) {
+ printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n",
+ ioc->sge_count, MAX_IOCTL_SGE);
+ return -EINVAL;
+ }
+
+ cmd = megasas_get_cmd(instance);
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * User's IOCTL packet has 2 frames (maximum). Copy those two
+ * frames into our cmd's frames. cmd->frame's context will get
+ * overwritten when we copy from user's frames. So set that value
+ * alone separately
+ */
+ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
+ cmd->frame->hdr.context = cpu_to_le32(cmd->index);
+ cmd->frame->hdr.pad_0 = 0;
+ cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
+ MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64));
+
+ if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
+ error = megasas_set_crash_dump_params_ioctl(cmd);
+ megasas_return_cmd(instance, cmd);
+ return error;
+ }
+
+ /*
+ * The management interface between applications and the fw uses
+ * MFI frames. E.g, RAID configuration changes, LD property changes
+ * etc are accomplishes through different kinds of MFI frames. The
+ * driver needs to care only about substituting user buffers with
+ * kernel buffers in SGLs. The location of SGL is embedded in the
+ * struct iocpacket itself.
+ */
+ kern_sge32 = (struct megasas_sge32 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
+
+ /*
+ * For each user buffer, create a mirror buffer and copy in
+ */
+ for (i = 0; i < ioc->sge_count; i++) {
+ if (!ioc->sgl[i].iov_len)
+ continue;
+
+ kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
+ ioc->sgl[i].iov_len,
+ &buf_handle, GFP_KERNEL);
+ if (!kbuff_arr[i]) {
+ printk(KERN_DEBUG "megasas: Failed to alloc "
+ "kernel SGL buffer for IOCTL \n");
+ error = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * We don't change the dma_coherent_mask, so
+ * pci_alloc_consistent only returns 32bit addresses
+ */
+ kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+ kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+
+ /*
+ * We created a kernel buffer corresponding to the
+ * user buffer. Now copy in from the user buffer
+ */
+ if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
+ (u32) (ioc->sgl[i].iov_len))) {
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (ioc->sense_len) {
+ sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
+ &sense_handle, GFP_KERNEL);
+ if (!sense) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ sense_ptr =
+ (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
+ *sense_ptr = cpu_to_le32(sense_handle);
+ }
+
+ /*
+ * Set the sync_cmd flag so that the ISR knows not to complete this
+ * cmd to the SCSI mid-layer
+ */
+ cmd->sync_cmd = 1;
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+ cmd->sync_cmd = 0;
+
+ if (instance->unload == 1) {
+ dev_info(&instance->pdev->dev, "Driver unload is in progress "
+ "don't submit data to application\n");
+ goto out;
+ }
+ /*
+ * copy out the kernel buffers to user buffers
+ */
+ for (i = 0; i < ioc->sge_count; i++) {
+ if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
+ ioc->sgl[i].iov_len)) {
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ /*
+ * copy out the sense
+ */
+ if (ioc->sense_len) {
+ /*
+ * sense_ptr points to the location that has the user
+ * sense buffer address
+ */
+ sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
+ ioc->sense_off);
+
+ if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
+ sense, ioc->sense_len)) {
+ printk(KERN_ERR "megasas: Failed to copy out to user "
+ "sense data\n");
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ /*
+ * copy the status codes returned by the fw
+ */
+ if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
+ &cmd->frame->hdr.cmd_status, sizeof(u8))) {
+ printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
+ error = -EFAULT;
+ }
+
+ out:
+ if (sense) {
+ dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
+ sense, sense_handle);
+ }
+
+ for (i = 0; i < ioc->sge_count; i++) {
+ if (kbuff_arr[i])
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge32[i].length),
+ kbuff_arr[i],
+ le32_to_cpu(kern_sge32[i].phys_addr));
+ kbuff_arr[i] = NULL;
+ }
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+ return error;
+}
+
+static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
+{
+ struct megasas_iocpacket __user *user_ioc =
+ (struct megasas_iocpacket __user *)arg;
+ struct megasas_iocpacket *ioc;
+ struct megasas_instance *instance;
+ int error;
+ int i;
+ unsigned long flags;
+ u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+
+ ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
+ if (!ioc)
+ return -ENOMEM;
+
+ if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
+ error = -EFAULT;
+ goto out_kfree_ioc;
+ }
+
+ instance = megasas_lookup_instance(ioc->host_no);
+ if (!instance) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ /* Adjust ioctl wait time for VF mode */
+ if (instance->requestorId)
+ wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+ /* Block ioctls in VF mode */
+ if (instance->requestorId && !allow_vf_ioctls) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ printk(KERN_ERR "Controller in crit error\n");
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ if (instance->unload == 1) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ if (down_interruptible(&instance->ioctl_sem)) {
+ error = -ERESTARTSYS;
+ goto out_kfree_ioc;
+ }
+
+ for (i = 0; i < wait_time; i++) {
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: waiting"
+ "for controller reset to finish\n");
+ }
+
+ msleep(1000);
+ }
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ printk(KERN_ERR "megaraid_sas: timed out while"
+ "waiting for HBA to recover\n");
+ error = -ENODEV;
+ goto out_up;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+ out_up:
+ up(&instance->ioctl_sem);
+
+ out_kfree_ioc:
+ kfree(ioc);
+ return error;
+}
+
+static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
+{
+ struct megasas_instance *instance;
+ struct megasas_aen aen;
+ int error;
+ int i;
+ unsigned long flags;
+ u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+
+ if (file->private_data != file) {
+ printk(KERN_DEBUG "megasas: fasync_helper was not "
+ "called first\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
+ return -EFAULT;
+
+ instance = megasas_lookup_instance(aen.host_no);
+
+ if (!instance)
+ return -ENODEV;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ return -ENODEV;
+ }
+
+ if (instance->unload == 1) {
+ return -ENODEV;
+ }
+
+ for (i = 0; i < wait_time; i++) {
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock,
+ flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: waiting for"
+ "controller reset to finish\n");
+ }
+
+ msleep(1000);
+ }
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ printk(KERN_ERR "megaraid_sas: timed out while waiting"
+ "for HBA to recover.\n");
+ return -ENODEV;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ mutex_lock(&instance->aen_mutex);
+ error = megasas_register_aen(instance, aen.seq_num,
+ aen.class_locale_word);
+ mutex_unlock(&instance->aen_mutex);
+ return error;
+}
+
+/**
+ * megasas_mgmt_ioctl - char node ioctl entry point
+ */
+static long
+megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case MEGASAS_IOC_FIRMWARE:
+ return megasas_mgmt_ioctl_fw(file, arg);
+
+ case MEGASAS_IOC_GET_AEN:
+ return megasas_mgmt_ioctl_aen(file, arg);
+ }
+
+ return -ENOTTY;
+}
+
+#ifdef CONFIG_COMPAT
+static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
+{
+ struct compat_megasas_iocpacket __user *cioc =
+ (struct compat_megasas_iocpacket __user *)arg;
+ struct megasas_iocpacket __user *ioc =
+ compat_alloc_user_space(sizeof(struct megasas_iocpacket));
+ int i;
+ int error = 0;
+ compat_uptr_t ptr;
+
+ if (clear_user(ioc, sizeof(*ioc)))
+ return -EFAULT;
+
+ if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
+ copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
+ copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
+ copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
+ copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
+ copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
+ return -EFAULT;
+
+ /*
+ * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
+ * sense_len is not null, so prepare the 64bit value under
+ * the same condition.
+ */
+ if (ioc->sense_len) {
+ void __user **sense_ioc_ptr =
+ (void __user **)(ioc->frame.raw + ioc->sense_off);
+ compat_uptr_t *sense_cioc_ptr =
+ (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ if (get_user(ptr, sense_cioc_ptr) ||
+ put_user(compat_ptr(ptr), sense_ioc_ptr))
+ return -EFAULT;
+ }
+
+ for (i = 0; i < MAX_IOCTL_SGE; i++) {
+ if (get_user(ptr, &cioc->sgl[i].iov_base) ||
+ put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
+ copy_in_user(&ioc->sgl[i].iov_len,
+ &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
+ return -EFAULT;
+ }
+
+ error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
+
+ if (copy_in_user(&cioc->frame.hdr.cmd_status,
+ &ioc->frame.hdr.cmd_status, sizeof(u8))) {
+ printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
+ return -EFAULT;
+ }
+ return error;
+}
+
+static long
+megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case MEGASAS_IOC_FIRMWARE32:
+ return megasas_mgmt_compat_ioctl_fw(file, arg);
+ case MEGASAS_IOC_GET_AEN:
+ return megasas_mgmt_ioctl_aen(file, arg);
+ }
+
+ return -ENOTTY;
+}
+#endif
+
+/*
+ * File operations structure for management interface
+ */
+static const struct file_operations megasas_mgmt_fops = {
+ .owner = THIS_MODULE,
+ .open = megasas_mgmt_open,
+ .fasync = megasas_mgmt_fasync,
+ .unlocked_ioctl = megasas_mgmt_ioctl,
+ .poll = megasas_mgmt_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = megasas_mgmt_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+/*
+ * PCI hotplug support registration structure
+ */
+static struct pci_driver megasas_pci_driver = {
+
+ .name = "megaraid_sas",
+ .id_table = megasas_pci_table,
+ .probe = megasas_probe_one,
+ .remove = megasas_detach_one,
+ .suspend = megasas_suspend,
+ .resume = megasas_resume,
+ .shutdown = megasas_shutdown,
+};
+
+/*
+ * Sysfs driver attributes
+ */
+static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
+{
+ return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
+ MEGASAS_VERSION);
+}
+
+static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
+
+static ssize_t
+megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_poll_for_event);
+}
+
+static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
+ megasas_sysfs_show_support_poll_for_event, NULL);
+
+ static ssize_t
+megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_device_change);
+}
+
+static DRIVER_ATTR(support_device_change, S_IRUGO,
+ megasas_sysfs_show_support_device_change, NULL);
+
+static ssize_t
+megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", megasas_dbg_lvl);
+}
+
+static ssize_t
+megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
+{
+ int retval = count;
+ if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){
+ printk(KERN_ERR "megasas: could not set dbg_lvl\n");
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
+ megasas_sysfs_set_dbg_lvl);
+
+static void
+megasas_aen_polling(struct work_struct *work)
+{
+ struct megasas_aen_event *ev =
+ container_of(work, struct megasas_aen_event, hotplug_work.work);
+ struct megasas_instance *instance = ev->instance;
+ union megasas_evt_class_locale class_locale;
+ struct Scsi_Host *host;
+ struct scsi_device *sdev1;
+ u16 pd_index = 0;
+ u16 ld_index = 0;
+ int i, j, doscan = 0;
+ u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
+ int error;
+
+ if (!instance) {
+ printk(KERN_ERR "invalid instance!\n");
+ kfree(ev);
+ return;
+ }
+
+ /* Adjust event workqueue thread wait time for VF mode */
+ if (instance->requestorId)
+ wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+ /* Don't run the event workqueue thread if OCR is running */
+ for (i = 0; i < wait_time; i++) {
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
+ break;
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: %s waiting for "
+ "controller reset to finish for scsi%d\n",
+ __func__, instance->host->host_no);
+ }
+ msleep(1000);
+ }
+
+ instance->ev = NULL;
+ host = instance->host;
+ if (instance->evt_detail) {
+
+ switch (le32_to_cpu(instance->evt_detail->code)) {
+ case MR_EVT_PD_INSERTED:
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ pd_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 =
+ scsi_device_lookup(host, i, j, 0);
+
+ if (instance->pd_list[pd_index].driveState
+ == MR_PD_STATE_SYSTEM) {
+ if (!sdev1) {
+ scsi_add_device(host, i, j, 0);
+ }
+
+ if (sdev1)
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ doscan = 0;
+ break;
+
+ case MR_EVT_PD_REMOVED:
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ pd_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 =
+ scsi_device_lookup(host, i, j, 0);
+
+ if (instance->pd_list[pd_index].driveState
+ == MR_PD_STATE_SYSTEM) {
+ if (sdev1) {
+ scsi_device_put(sdev1);
+ }
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ }
+ doscan = 0;
+ break;
+
+ case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
+ case MR_EVT_LD_DELETED:
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (sdev1)
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ doscan = 0;
+ }
+ break;
+ case MR_EVT_LD_CREATED:
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ }
+ if (sdev1)
+ scsi_device_put(sdev1);
+ }
+ }
+ doscan = 0;
+ }
+ break;
+ case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+ case MR_EVT_FOREIGN_CFG_IMPORTED:
+ case MR_EVT_LD_STATE_CHANGE:
+ doscan = 1;
+ break;
+ default:
+ doscan = 0;
+ break;
+ }
+ } else {
+ printk(KERN_ERR "invalid evt_detail!\n");
+ kfree(ev);
+ return;
+ }
+
+ if (doscan) {
+ printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n",
+ instance->host->host_no);
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+ sdev1 = scsi_device_lookup(host, i, j, 0);
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ if (!sdev1) {
+ scsi_add_device(host, i, j, 0);
+ }
+ if (sdev1)
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ }
+
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host,
+ MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if ( instance->aen_cmd != NULL ) {
+ kfree(ev);
+ return ;
+ }
+
+ seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+
+ /* Register AEN with FW for latest sequence number plus 1 */
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+ mutex_lock(&instance->aen_mutex);
+ error = megasas_register_aen(instance, seq_num,
+ class_locale.word);
+ mutex_unlock(&instance->aen_mutex);
+
+ if (error)
+ printk(KERN_ERR "register aen failed error %x\n", error);
+
+ kfree(ev);
+}
+
+/**
+ * megasas_init - Driver load entry point
+ */
+static int __init megasas_init(void)
+{
+ int rval;
+
+ /*
+ * Announce driver version and other information
+ */
+ pr_info("megasas: %s\n", MEGASAS_VERSION);
+
+ spin_lock_init(&poll_aen_lock);
+
+ support_poll_for_event = 2;
+ support_device_change = 1;
+
+ memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
+
+ /*
+ * Register character device node
+ */
+ rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
+
+ if (rval < 0) {
+ printk(KERN_DEBUG "megasas: failed to open device node\n");
+ return rval;
+ }
+
+ megasas_mgmt_majorno = rval;
+
+ /*
+ * Register ourselves as PCI hotplug module
+ */
+ rval = pci_register_driver(&megasas_pci_driver);
+
+ if (rval) {
+ printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
+ goto err_pcidrv;
+ }
+
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_version);
+ if (rval)
+ goto err_dcf_attr_ver;
+
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_poll_for_event);
+ if (rval)
+ goto err_dcf_support_poll_for_event;
+
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_dbg_lvl);
+ if (rval)
+ goto err_dcf_dbg_lvl;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+ if (rval)
+ goto err_dcf_support_device_change;
+
+ return rval;
+
+err_dcf_support_device_change:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_dbg_lvl);
+err_dcf_dbg_lvl:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_poll_for_event);
+err_dcf_support_poll_for_event:
+ driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+err_dcf_attr_ver:
+ pci_unregister_driver(&megasas_pci_driver);
+err_pcidrv:
+ unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+ return rval;
+}
+
+/**
+ * megasas_exit - Driver unload entry point
+ */
+static void __exit megasas_exit(void)
+{
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_dbg_lvl);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_poll_for_event);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+ driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+
+ pci_unregister_driver(&megasas_pci_driver);
+ unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+}
+
+module_init(megasas_init);
+module_exit(megasas_exit);
diff --git a/kernel/drivers/scsi/megaraid/megaraid_sas_fp.c b/kernel/drivers/scsi/megaraid/megaraid_sas_fp.c
new file mode 100644
index 000000000..4f7228786
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -0,0 +1,1360 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2009-2013 LSI Corporation
+ * Copyright (c) 2013-2014 Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * FILE: megaraid_sas_fp.c
+ *
+ * Authors: Avago Technologies
+ * Sumant Patro
+ * Varad Talamacki
+ * Manoj Jose
+ * Kashyap Desai <kashyap.desai@avagotech.com>
+ * Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ * San Jose, California 95131
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+#include <asm/div64.h>
+
+#define LB_PENDING_CMDS_DEFAULT 4
+static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+module_param(lb_pending_cmds, int, S_IRUGO);
+MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
+ "threshold. Valid Values are 1-128. Default: 4");
+
+
+#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
+#define MR_LD_STATE_OPTIMAL 3
+#define FALSE 0
+#define TRUE 1
+
+#define SPAN_DEBUG 0
+#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
+#define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
+#define SPAN_INVALID 0xff
+
+/* Prototypes */
+static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo);
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+ u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
+static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
+ u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
+
+u32 mega_mod64(u64 dividend, u32 divisor)
+{
+ u64 d;
+ u32 remainder;
+
+ if (!divisor)
+ printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
+ d = dividend;
+ remainder = do_div(d, divisor);
+ return remainder;
+}
+
+/**
+ * @param dividend : Dividend
+ * @param divisor : Divisor
+ *
+ * @return quotient
+ **/
+u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
+{
+ u32 remainder;
+ u64 d;
+
+ if (!divisor)
+ printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
+
+ d = dividend;
+ remainder = do_div(d, divisor);
+
+ return d;
+}
+
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return &map->raidMap.ldSpanMap[ld].ldRaid;
+}
+
+static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
+ struct MR_DRV_RAID_MAP_ALL
+ *map)
+{
+ return &map->raidMap.ldSpanMap[ld].spanBlock[0];
+}
+
+static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
+}
+
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
+}
+
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
+}
+
+u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return map->raidMap.devHndlInfo[pd].curDevHdl;
+}
+
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
+}
+
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldTgtIdToLd[ldTgtId];
+}
+
+static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
+}
+
+/*
+ * This function will Populate Driver Map using firmware raid map
+ */
+void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
+ struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
+ int i;
+ u16 ld_count;
+
+
+ struct MR_DRV_RAID_MAP_ALL *drv_map =
+ fusion->ld_drv_map[(instance->map_id & 1)];
+ struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+
+ if (instance->supportmax256vd) {
+ memcpy(fusion->ld_drv_map[instance->map_id & 1],
+ fusion->ld_map[instance->map_id & 1],
+ fusion->current_map_sz);
+ /* New Raid map will not set totalSize, so keep expected value
+ * for legacy code in ValidateMapInfo
+ */
+ pDrvRaidMap->totalSize =
+ cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
+ } else {
+ fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
+ fusion->ld_map[(instance->map_id & 1)];
+ pFwRaidMap = &fw_map_old->raidMap;
+ ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
+
+#if VD_EXT_DEBUG
+ for (i = 0; i < ld_count; i++) {
+ dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
+ "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
+ instance->unique_id, i,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
+ }
+#endif
+
+ memset(drv_map, 0, fusion->drv_map_sz);
+ pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
+ pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+ pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
+ for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
+ pDrvRaidMap->ldTgtIdToLd[i] =
+ (u8)pFwRaidMap->ldTgtIdToLd[i];
+ for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
+ i < MAX_LOGICAL_DRIVES_EXT; i++)
+ pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
+ for (i = 0; i < ld_count; i++) {
+ pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
+#if VD_EXT_DEBUG
+ dev_dbg(&instance->pdev->dev,
+ "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+ "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
+ "size 0x%x\n", i, i,
+ pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ dev_dbg(&instance->pdev->dev,
+ "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+ "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
+ "size 0x%x\n", i, i,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
+ "raid map %p LD RAID MAP %p/%p\n", drv_map,
+ pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
+ &pDrvRaidMap->ldSpanMap[i].ldRaid);
+#endif
+ }
+ memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
+ sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
+ memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
+ sizeof(struct MR_DEV_HANDLE_INFO) *
+ MAX_RAIDMAP_PHYSICAL_DEVICES);
+ }
+}
+
+/*
+ * This function will validate Map info data provided by FW
+ */
+u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
+ struct MR_DRV_RAID_MAP *pDrvRaidMap;
+ struct LD_LOAD_BALANCE_INFO *lbInfo;
+ PLD_SPAN_INFO ldSpanInfo;
+ struct MR_LD_RAID *raid;
+ u16 ldCount, num_lds;
+ u16 ld;
+ u32 expected_size;
+
+
+ MR_PopulateDrvRaidMap(instance);
+
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ pDrvRaidMap = &drv_map->raidMap;
+
+ lbInfo = fusion->load_balance_info;
+ ldSpanInfo = fusion->log_to_span;
+
+ if (instance->supportmax256vd)
+ expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
+ else
+ expected_size =
+ (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
+
+ if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
+ dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
+ (unsigned int) expected_size);
+ dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
+ (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
+ le32_to_cpu(pDrvRaidMap->totalSize));
+ return 0;
+ }
+
+ if (instance->UnevenSpanSupport)
+ mr_update_span_set(drv_map, ldSpanInfo);
+
+ mr_update_load_balance_params(drv_map, lbInfo);
+
+ num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
+
+ /*Convert Raid capability values to CPU arch */
+ for (ldCount = 0; ldCount < num_lds; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, drv_map);
+ raid = MR_LdRaidGet(ld, drv_map);
+ le32_to_cpus((u32 *)&raid->capability);
+ }
+
+ return 1;
+}
+
+u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
+ struct MR_QUAD_ELEMENT *quad;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u32 span, j;
+
+ for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
+
+ for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
+ quad = &pSpanBlock->block_span_info.quad[j];
+
+ if (le32_to_cpu(quad->diff) == 0)
+ return SPAN_INVALID;
+ if (le64_to_cpu(quad->logStart) <= row && row <=
+ le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
+ le32_to_cpu(quad->diff))) == 0) {
+ if (span_blk != NULL) {
+ u64 blk, debugBlk;
+ blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
+ debugBlk = blk;
+
+ blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* Function to print info about span set created in driver from FW raid map
+*
+* Inputs :
+* map - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*/
+#if SPAN_DEBUG
+static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo)
+{
+
+ u8 span;
+ u32 element;
+ struct MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set;
+ struct MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u16 ld;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ continue;
+ raid = MR_LdRaidGet(ld, map);
+ dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
+ ld, raid->spanDepth);
+ for (span = 0; span < raid->spanDepth; span++)
+ dev_dbg(&instance->pdev->dev, "Span=%x,"
+ " number of quads=%x\n", span,
+ le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements));
+ for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ if (span_set->span_row_data_width == 0)
+ break;
+
+ dev_dbg(&instance->pdev->dev, "Span Set %x:"
+ "width=%x, diff=%x\n", element,
+ (unsigned int)span_set->span_row_data_width,
+ (unsigned int)span_set->diff);
+ dev_dbg(&instance->pdev->dev, "logical LBA"
+ "start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->log_start_lba,
+ (long unsigned int)span_set->log_end_lba);
+ dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
+ " end=0x%08lx\n",
+ (long unsigned int)span_set->span_row_start,
+ (long unsigned int)span_set->span_row_end);
+ dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
+ " end=0x%08lx\n",
+ (long unsigned int)span_set->data_row_start,
+ (long unsigned int)span_set->data_row_end);
+ dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
+ " end=0x%08lx\n",
+ (long unsigned int)span_set->data_strip_start,
+ (long unsigned int)span_set->data_strip_end);
+
+ for (span = 0; span < raid->spanDepth; span++) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >=
+ element + 1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.
+ quad[element];
+ dev_dbg(&instance->pdev->dev, "Span=%x,"
+ "Quad=%x, diff=%x\n", span,
+ element, le32_to_cpu(quad->diff));
+ dev_dbg(&instance->pdev->dev,
+ "offset_in_span=0x%08lx\n",
+ (long unsigned int)le64_to_cpu(quad->offsetInSpan));
+ dev_dbg(&instance->pdev->dev,
+ "logical start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)le64_to_cpu(quad->logStart),
+ (long unsigned int)le64_to_cpu(quad->logEnd));
+ }
+ }
+ }
+ }
+ return 0;
+}
+#endif
+
+/*
+******************************************************************************
+*
+* This routine calculates the Span block for given row using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* row - Row number
+* map - LD map
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+* div_error - Devide error code.
+*/
+
+u32 mr_spanset_get_span_block(struct megasas_instance *instance,
+ u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ struct MR_QUAD_ELEMENT *quad;
+ u32 span, info;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+
+ if (row > span_set->data_row_end)
+ continue;
+
+ for (span = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].
+ block_span_info.quad[info];
+ if (le32_to_cpu(quad->diff) == 0)
+ return SPAN_INVALID;
+ if (le64_to_cpu(quad->logStart) <= row &&
+ row <= le64_to_cpu(quad->logEnd) &&
+ (mega_mod64(row - le64_to_cpu(quad->logStart),
+ le32_to_cpu(quad->diff))) == 0) {
+ if (span_blk != NULL) {
+ u64 blk;
+ blk = mega_div64_32
+ ((row - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff));
+ blk = (blk + le64_to_cpu(quad->offsetInSpan))
+ << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the row for given strip using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* Strip - Strip
+* map - LD map
+*
+* Outputs :
+*
+* row - row associated with strip
+*/
+
+static u64 get_row_from_strip(struct megasas_instance *instance,
+ u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+ u32 info, strip_offset, span, span_offset;
+ u64 span_set_Strip, span_set_Row, retval;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (strip > span_set->data_strip_end)
+ continue;
+
+ span_set_Strip = strip - span_set->data_strip_start;
+ strip_offset = mega_mod64(span_set_Strip,
+ span_set->span_row_data_width);
+ span_set_Row = mega_div64_32(span_set_Strip,
+ span_set->span_row_data_width) * span_set->diff;
+ for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
+ if (strip_offset >=
+ span_set->strip_offset[span])
+ span_offset++;
+ else
+ break;
+ }
+#if SPAN_DEBUG
+ dev_info(&instance->pdev->dev, "Strip 0x%llx,"
+ "span_set_Strip 0x%llx, span_set_Row 0x%llx"
+ "data width 0x%llx span offset 0x%x\n", strip,
+ (unsigned long long)span_set_Strip,
+ (unsigned long long)span_set_Row,
+ (unsigned long long)span_set->span_row_data_width,
+ span_offset);
+ dev_info(&instance->pdev->dev, "For strip 0x%llx"
+ "row is 0x%llx\n", strip,
+ (unsigned long long) span_set->data_row_start +
+ (unsigned long long) span_set_Row + (span_offset - 1));
+#endif
+ retval = (span_set->data_row_start + span_set_Row +
+ (span_offset - 1));
+ return retval;
+ }
+ return -1LLU;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the Start Strip for given row using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* row - Row number
+* map - LD map
+*
+* Outputs :
+*
+* Strip - Start strip associated with row
+*/
+
+static u64 get_strip_from_row(struct megasas_instance *instance,
+ u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ struct MR_QUAD_ELEMENT *quad;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+ u32 span, info;
+ u64 strip;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (row > span_set->data_row_end)
+ continue;
+
+ for (span = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.quad[info];
+ if (le64_to_cpu(quad->logStart) <= row &&
+ row <= le64_to_cpu(quad->logEnd) &&
+ mega_mod64((row - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff)) == 0) {
+ strip = mega_div64_32
+ (((row - span_set->data_row_start)
+ - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff));
+ strip *= span_set->span_row_data_width;
+ strip += span_set->data_strip_start;
+ strip += span_set->strip_offset[span];
+ return strip;
+ }
+ }
+ }
+ dev_err(&instance->pdev->dev, "get_strip_from_row"
+ "returns invalid strip for ld=%x, row=%lx\n",
+ ld, (long unsigned int)row);
+ return -1;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the Physical Arm for given strip using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* strip - Strip
+* map - LD map
+*
+* Outputs :
+*
+* Phys Arm - Phys Arm associated with strip
+*/
+
+static u32 get_arm_from_strip(struct megasas_instance *instance,
+ u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+ u32 info, strip_offset, span, span_offset, retval;
+
+ for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (strip > span_set->data_strip_end)
+ continue;
+
+ strip_offset = (uint)mega_mod64
+ ((strip - span_set->data_strip_start),
+ span_set->span_row_data_width);
+
+ for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
+ if (strip_offset >=
+ span_set->strip_offset[span])
+ span_offset =
+ span_set->strip_offset[span];
+ else
+ break;
+ }
+#if SPAN_DEBUG
+ dev_info(&instance->pdev->dev, "get_arm_from_strip:"
+ "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
+ (long unsigned int)strip, (strip_offset - span_offset));
+#endif
+ retval = (strip_offset - span_offset);
+ return retval;
+ }
+
+ dev_err(&instance->pdev->dev, "get_arm_from_strip"
+ "returns invalid arm for ld=%x strip=%lx\n",
+ ld, (long unsigned int)strip);
+
+ return -1;
+}
+
+/* This Function will return Phys arm */
+u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ /* Need to check correct default value */
+ u32 arm = 0;
+
+ switch (raid->level) {
+ case 0:
+ case 5:
+ case 6:
+ arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
+ break;
+ case 1:
+ /* start with logical arm */
+ arm = get_arm_from_strip(instance, ld, stripe, map);
+ if (arm != -1U)
+ arm *= 2;
+ break;
+ }
+
+ return arm;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe using spanset
+*
+* Inputs :
+*
+* ld - Logical drive number
+* stripRow - Stripe number
+* stripRef - Reference in stripe
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+*/
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+ u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u32 pd, arRef;
+ u8 physArm, span;
+ u64 row;
+ u8 retval = TRUE;
+ u8 do_invader = 0;
+ u64 *pdBlock = &io_info->pdBlock;
+ u16 *pDevHandle = &io_info->devHandle;
+ u32 logArm, rowMod, armQ, arm;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
+ instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ do_invader = 1;
+
+ /*Get row and span from io_info for Uneven Span IO.*/
+ row = io_info->start_row;
+ span = io_info->start_span;
+
+
+ if (raid->level == 6) {
+ logArm = get_arm_from_strip(instance, ld, stripRow, map);
+ if (logArm == -1U)
+ return FALSE;
+ rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
+ armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
+ arm = armQ + 1 + logArm;
+ if (arm >= SPAN_ROW_SIZE(map, ld, span))
+ arm -= SPAN_ROW_SIZE(map, ld, span);
+ physArm = (u8)arm;
+ } else
+ /* Calculate the arm */
+ physArm = get_arm(instance, ld, span, stripRow, map);
+ if (physArm == 0xFF)
+ return FALSE;
+
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+ pd = MR_ArPdGet(arRef, physArm, map);
+
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ else {
+ *pDevHandle = MR_PD_INVALID;
+ if ((raid->level >= 5) &&
+ (!do_invader || (do_invader &&
+ (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ }
+ }
+
+ *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
+ physArm;
+ io_info->span_arm = pRAID_Context->spanArm;
+ return retval;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe.
+*
+* Inputs :
+*
+* ld - Logical drive number
+* stripRow - Stripe number
+* stripRef - Reference in stripe
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+*/
+u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
+ u16 stripRef, struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u32 pd, arRef;
+ u8 physArm, span;
+ u64 row;
+ u8 retval = TRUE;
+ u8 do_invader = 0;
+ u64 *pdBlock = &io_info->pdBlock;
+ u16 *pDevHandle = &io_info->devHandle;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
+ instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ do_invader = 1;
+
+ row = mega_div64_32(stripRow, raid->rowDataSize);
+
+ if (raid->level == 6) {
+ /* logical arm within row */
+ u32 logArm = mega_mod64(stripRow, raid->rowDataSize);
+ u32 rowMod, armQ, arm;
+
+ if (raid->rowSize == 0)
+ return FALSE;
+ /* get logical row mod */
+ rowMod = mega_mod64(row, raid->rowSize);
+ armQ = raid->rowSize-1-rowMod; /* index of Q drive */
+ arm = armQ+1+logArm; /* data always logically follows Q */
+ if (arm >= raid->rowSize) /* handle wrap condition */
+ arm -= raid->rowSize;
+ physArm = (u8)arm;
+ } else {
+ if (raid->modFactor == 0)
+ return FALSE;
+ physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow,
+ raid->modFactor),
+ map);
+ }
+
+ if (raid->spanDepth == 1) {
+ span = 0;
+ *pdBlock = row << raid->stripeShift;
+ } else {
+ span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
+ if (span == SPAN_INVALID)
+ return FALSE;
+ }
+
+ /* Get the array on which this span is present */
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+ pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
+
+ if (pd != MR_PD_INVALID)
+ /* Get dev handle from Pd. */
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ else {
+ *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
+ if ((raid->level >= 5) &&
+ (!do_invader || (do_invader &&
+ (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ /* Get alternate Pd. */
+ pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (pd != MR_PD_INVALID)
+ /* Get dev handle from Pd */
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ }
+ }
+
+ *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
+ physArm;
+ io_info->span_arm = pRAID_Context->spanArm;
+ return retval;
+}
+
+/*
+******************************************************************************
+*
+* MR_BuildRaidContext function
+*
+* This function will initiate command processing. The start/end row and strip
+* information is calculated then the lock is acquired.
+* This function will return 0 if region lock was acquired OR return num strips
+*/
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+ struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
+{
+ struct MR_LD_RAID *raid;
+ u32 ld, stripSize, stripe_mask;
+ u64 endLba, endStrip, endRow, start_row, start_strip;
+ u64 regStart;
+ u32 regSize;
+ u8 num_strips, numRows;
+ u16 ref_in_start_stripe, ref_in_end_stripe;
+ u64 ldStartBlock;
+ u32 numBlocks, ldTgtId;
+ u8 isRead;
+ u8 retval = 0;
+ u8 startlba_span = SPAN_INVALID;
+ u64 *pdBlock = &io_info->pdBlock;
+
+ ldStartBlock = io_info->ldStartBlock;
+ numBlocks = io_info->numBlocks;
+ ldTgtId = io_info->ldTgtId;
+ isRead = io_info->isRead;
+ io_info->IoforUnevenSpan = 0;
+ io_info->start_span = SPAN_INVALID;
+
+ ld = MR_TargetIdToLdGet(ldTgtId, map);
+ raid = MR_LdRaidGet(ld, map);
+
+ /*
+ * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
+ * return FALSE
+ */
+ if (raid->rowDataSize == 0) {
+ if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
+ return FALSE;
+ else if (instance->UnevenSpanSupport) {
+ io_info->IoforUnevenSpan = 1;
+ } else {
+ dev_info(&instance->pdev->dev,
+ "raid->rowDataSize is 0, but has SPAN[0]"
+ "rowDataSize = 0x%0x,"
+ "but there is _NO_ UnevenSpanSupport\n",
+ MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
+ return FALSE;
+ }
+ }
+
+ stripSize = 1 << raid->stripeShift;
+ stripe_mask = stripSize-1;
+
+
+ /*
+ * calculate starting row and stripe, and number of strips and rows
+ */
+ start_strip = ldStartBlock >> raid->stripeShift;
+ ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
+ endLba = ldStartBlock + numBlocks - 1;
+ ref_in_end_stripe = (u16)(endLba & stripe_mask);
+ endStrip = endLba >> raid->stripeShift;
+ num_strips = (u8)(endStrip - start_strip + 1); /* End strip */
+
+ if (io_info->IoforUnevenSpan) {
+ start_row = get_row_from_strip(instance, ld, start_strip, map);
+ endRow = get_row_from_strip(instance, ld, endStrip, map);
+ if (start_row == -1ULL || endRow == -1ULL) {
+ dev_info(&instance->pdev->dev, "return from %s %d."
+ "Send IO w/o region lock.\n",
+ __func__, __LINE__);
+ return FALSE;
+ }
+
+ if (raid->spanDepth == 1) {
+ startlba_span = 0;
+ *pdBlock = start_row << raid->stripeShift;
+ } else
+ startlba_span = (u8)mr_spanset_get_span_block(instance,
+ ld, start_row, pdBlock, map);
+ if (startlba_span == SPAN_INVALID) {
+ dev_info(&instance->pdev->dev, "return from %s %d"
+ "for row 0x%llx,start strip %llx"
+ "endSrip %llx\n", __func__, __LINE__,
+ (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip);
+ return FALSE;
+ }
+ io_info->start_span = startlba_span;
+ io_info->start_row = start_row;
+#if SPAN_DEBUG
+ dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
+ "for row 0x%llx, start strip 0x%llx end strip 0x%llx"
+ " span 0x%x\n", __func__, __LINE__,
+ (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip, startlba_span);
+ dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
+ "Start span 0x%x\n", (unsigned long long)start_row,
+ (unsigned long long)endRow, startlba_span);
+#endif
+ } else {
+ start_row = mega_div64_32(start_strip, raid->rowDataSize);
+ endRow = mega_div64_32(endStrip, raid->rowDataSize);
+ }
+ numRows = (u8)(endRow - start_row + 1);
+
+ /*
+ * calculate region info.
+ */
+
+ /* assume region is at the start of the first row */
+ regStart = start_row << raid->stripeShift;
+ /* assume this IO needs the full row - we'll adjust if not true */
+ regSize = stripSize;
+
+ /* Check if we can send this I/O via FastPath */
+ if (raid->capability.fpCapable) {
+ if (isRead)
+ io_info->fpOkForIo = (raid->capability.fpReadCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpReadAcrossStripe));
+ else
+ io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpWriteAcrossStripe));
+ } else
+ io_info->fpOkForIo = FALSE;
+
+ if (numRows == 1) {
+ /* single-strip IOs can always lock only the data needed */
+ if (num_strips == 1) {
+ regStart += ref_in_start_stripe;
+ regSize = numBlocks;
+ }
+ /* multi-strip IOs always need to full stripe locked */
+ } else if (io_info->IoforUnevenSpan == 0) {
+ /*
+ * For Even span region lock optimization.
+ * If the start strip is the last in the start row
+ */
+ if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
+ regStart += ref_in_start_stripe;
+ /* initialize count to sectors from startref to end
+ of strip */
+ regSize = stripSize - ref_in_start_stripe;
+ }
+
+ /* add complete rows in the middle of the transfer */
+ if (numRows > 2)
+ regSize += (numRows-2) << raid->stripeShift;
+
+ /* if IO ends within first strip of last row*/
+ if (endStrip == endRow*raid->rowDataSize)
+ regSize += ref_in_end_stripe+1;
+ else
+ regSize += stripSize;
+ } else {
+ /*
+ * For Uneven span region lock optimization.
+ * If the start strip is the last in the start row
+ */
+ if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
+ SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
+ regStart += ref_in_start_stripe;
+ /* initialize count to sectors from
+ * startRef to end of strip
+ */
+ regSize = stripSize - ref_in_start_stripe;
+ }
+ /* Add complete rows in the middle of the transfer*/
+
+ if (numRows > 2)
+ /* Add complete rows in the middle of the transfer*/
+ regSize += (numRows-2) << raid->stripeShift;
+
+ /* if IO ends within first strip of last row */
+ if (endStrip == get_strip_from_row(instance, ld, endRow, map))
+ regSize += ref_in_end_stripe + 1;
+ else
+ regSize += stripSize;
+ }
+
+ pRAID_Context->timeoutValue =
+ cpu_to_le16(raid->fpIoTimeoutForLd ?
+ raid->fpIoTimeoutForLd :
+ map->raidMap.fpPdIoTimeoutSec);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ pRAID_Context->regLockFlags = (isRead) ?
+ raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
+ else
+ pRAID_Context->regLockFlags = (isRead) ?
+ REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
+ pRAID_Context->VirtualDiskTgtId = raid->targetId;
+ pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
+ pRAID_Context->regLockLength = cpu_to_le32(regSize);
+ pRAID_Context->configSeqNum = raid->seqNum;
+ /* save pointer to raid->LUN array */
+ *raidLUN = raid->LUN;
+
+
+ /*Get Phy Params only if FP capable, or else leave it to MR firmware
+ to do the calculation.*/
+ if (io_info->fpOkForIo) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(instance, ld,
+ start_strip, ref_in_start_stripe,
+ io_info, pRAID_Context, map) :
+ MR_GetPhyParams(instance, ld, start_strip,
+ ref_in_start_stripe, io_info,
+ pRAID_Context, map);
+ /* If IO on an invalid Pd, then FP is not possible.*/
+ if (io_info->devHandle == MR_PD_INVALID)
+ io_info->fpOkForIo = FALSE;
+ return retval;
+ } else if (isRead) {
+ uint stripIdx;
+ for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(instance, ld,
+ start_strip + stripIdx,
+ ref_in_start_stripe, io_info,
+ pRAID_Context, map) :
+ MR_GetPhyParams(instance, ld,
+ start_strip + stripIdx, ref_in_start_stripe,
+ io_info, pRAID_Context, map);
+ if (!retval)
+ return TRUE;
+ }
+ }
+
+#if SPAN_DEBUG
+ /* Just for testing what arm we get for strip.*/
+ if (io_info->IoforUnevenSpan)
+ get_arm_from_strip(instance, ld, start_strip, map);
+#endif
+ return TRUE;
+}
+
+/*
+******************************************************************************
+*
+* This routine pepare spanset info from Valid Raid map and store it into
+* local copy of ldSpanInfo per instance data structure.
+*
+* Inputs :
+* map - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*
+*/
+void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo)
+{
+ u8 span, count;
+ u32 element, span_row_width;
+ u64 span_row;
+ struct MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set, *span_set_prev;
+ struct MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u16 ld;
+
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ continue;
+ raid = MR_LdRaidGet(ld, map);
+ for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+ for (span = 0; span < raid->spanDepth; span++) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) <
+ element + 1)
+ continue;
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.
+ quad[element];
+
+ span_set->diff = le32_to_cpu(quad->diff);
+
+ for (count = 0, span_row_width = 0;
+ count < raid->spanDepth; count++) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
+ spanBlock[count].
+ block_span_info.
+ noElements) >= element + 1) {
+ span_set->strip_offset[count] =
+ span_row_width;
+ span_row_width +=
+ MR_LdSpanPtrGet
+ (ld, count, map)->spanRowDataSize;
+ printk(KERN_INFO "megasas:"
+ "span %x rowDataSize %x\n",
+ count, MR_LdSpanPtrGet
+ (ld, count, map)->spanRowDataSize);
+ }
+ }
+
+ span_set->span_row_data_width = span_row_width;
+ span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
+ le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
+ le32_to_cpu(quad->diff));
+
+ if (element == 0) {
+ span_set->log_start_lba = 0;
+ span_set->log_end_lba =
+ ((span_row << raid->stripeShift)
+ * span_row_width) - 1;
+
+ span_set->span_row_start = 0;
+ span_set->span_row_end = span_row - 1;
+
+ span_set->data_strip_start = 0;
+ span_set->data_strip_end =
+ (span_row * span_row_width) - 1;
+
+ span_set->data_row_start = 0;
+ span_set->data_row_end =
+ (span_row * le32_to_cpu(quad->diff)) - 1;
+ } else {
+ span_set_prev = &(ldSpanInfo[ld].
+ span_set[element - 1]);
+ span_set->log_start_lba =
+ span_set_prev->log_end_lba + 1;
+ span_set->log_end_lba =
+ span_set->log_start_lba +
+ ((span_row << raid->stripeShift)
+ * span_row_width) - 1;
+
+ span_set->span_row_start =
+ span_set_prev->span_row_end + 1;
+ span_set->span_row_end =
+ span_set->span_row_start + span_row - 1;
+
+ span_set->data_strip_start =
+ span_set_prev->data_strip_end + 1;
+ span_set->data_strip_end =
+ span_set->data_strip_start +
+ (span_row * span_row_width) - 1;
+
+ span_set->data_row_start =
+ span_set_prev->data_row_end + 1;
+ span_set->data_row_end =
+ span_set->data_row_start +
+ (span_row * le32_to_cpu(quad->diff)) - 1;
+ }
+ break;
+ }
+ if (span == raid->spanDepth)
+ break;
+ }
+ }
+#if SPAN_DEBUG
+ getSpanInfo(map, ldSpanInfo);
+#endif
+
+}
+
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
+ struct LD_LOAD_BALANCE_INFO *lbInfo)
+{
+ int ldCount;
+ u16 ld;
+ struct MR_LD_RAID *raid;
+
+ if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
+ lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, drv_map);
+ if (ld >= MAX_LOGICAL_DRIVES_EXT) {
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ continue;
+ }
+
+ raid = MR_LdRaidGet(ld, drv_map);
+ if ((raid->level != 1) ||
+ (raid->ldState != MR_LD_STATE_OPTIMAL)) {
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ continue;
+ }
+ lbInfo[ldCount].loadBalanceFlag = 1;
+ }
+}
+
+u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
+{
+ struct fusion_context *fusion;
+ struct MR_LD_RAID *raid;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
+ u16 pend0, pend1, ld;
+ u64 diff0, diff1;
+ u8 bestArm, pd0, pd1, span, arm;
+ u32 arRef, span_row_size;
+
+ u64 block = io_info->ldStartBlock;
+ u32 count = io_info->numBlocks;
+
+ span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
+ >> RAID_CTX_SPANARM_SPAN_SHIFT);
+ arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
+
+
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
+ raid = MR_LdRaidGet(ld, drv_map);
+ span_row_size = instance->UnevenSpanSupport ?
+ SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
+
+ arRef = MR_LdSpanArrayGet(ld, span, drv_map);
+ pd0 = MR_ArPdGet(arRef, arm, drv_map);
+ pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
+ (arm + 1 - span_row_size) : arm + 1, drv_map);
+
+ /* get the pending cmds for the data and mirror arms */
+ pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
+ pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
+
+ /* Determine the disk whose head is nearer to the req. block */
+ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
+ diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
+ bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
+
+ if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
+ (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
+ bestArm ^= 1;
+
+ /* Update the last accessed block on the correct pd */
+ io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
+ lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
+ io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
+#if SPAN_DEBUG
+ if (arm != bestArm)
+ dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
+ "occur - span 0x%x arm 0x%x bestArm 0x%x "
+ "io_info->span_arm 0x%x\n",
+ span, arm, bestArm, io_info->span_arm);
+#endif
+ return io_info->pd_after_lb;
+}
+
+u16 get_updated_dev_handle(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
+{
+ u8 arm_pd;
+ u16 devHandle;
+ struct fusion_context *fusion;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
+
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+
+ /* get best new arm (PD ID) */
+ arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info);
+ devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
+ atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
+ return devHandle;
+}
diff --git a/kernel/drivers/scsi/megaraid/megaraid_sas_fusion.c b/kernel/drivers/scsi/megaraid/megaraid_sas_fusion.c
new file mode 100644
index 000000000..5a0800d19
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -0,0 +1,2993 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2009-2013 LSI Corporation
+ * Copyright (c) 2013-2014 Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * FILE: megaraid_sas_fusion.c
+ *
+ * Authors: Avago Technologies
+ * Sumant Patro
+ * Adam Radford
+ * Kashyap Desai <kashyap.desai@avagotech.com>
+ * Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ * San Jose, California 95131
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_dbg.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+extern void megasas_free_cmds(struct megasas_instance *instance);
+extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
+ *instance);
+extern void
+megasas_complete_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd, u8 alt_status);
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds);
+
+void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
+int megasas_alloc_cmds(struct megasas_instance *instance);
+int
+megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
+int
+megasas_issue_polled(struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
+
+int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
+void megaraid_sas_kill_hba(struct megasas_instance *instance);
+
+extern u32 megasas_dbg_lvl;
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+ int initial);
+void megasas_start_timer(struct megasas_instance *instance,
+ struct timer_list *timer,
+ void *fn, unsigned long interval);
+extern struct megasas_mgmt_info megasas_mgmt_info;
+extern int resetwaittime;
+
+
+
+/**
+ * megasas_enable_intr_fusion - Enables interrupts
+ * @regs: MFI register set
+ */
+void
+megasas_enable_intr_fusion(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+
+ instance->mask_interrupts = 0;
+ /* For Thunderbolt/Invader also clear intr on enable */
+ writel(~0, &regs->outbound_intr_status);
+ readl(&regs->outbound_intr_status);
+
+ writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_fusion - Disables interrupt
+ * @regs: MFI register set
+ */
+void
+megasas_disable_intr_fusion(struct megasas_instance *instance)
+{
+ u32 mask = 0xFFFFFFFF;
+ u32 status;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ instance->mask_interrupts = 1;
+
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ status = readl(&regs->outbound_intr_mask);
+}
+
+int
+megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
+{
+ u32 status;
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & 1) {
+ writel(status, &regs->outbound_intr_status);
+ readl(&regs->outbound_intr_status);
+ return 1;
+ }
+ if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
+ return 0;
+
+ return 1;
+}
+
+/**
+ * megasas_get_cmd_fusion - Get a command from the free pool
+ * @instance: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
+ *instance)
+{
+ unsigned long flags;
+ struct fusion_context *fusion =
+ (struct fusion_context *)instance->ctrl_context;
+ struct megasas_cmd_fusion *cmd = NULL;
+
+ spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
+
+ if (!list_empty(&fusion->cmd_pool)) {
+ cmd = list_entry((&fusion->cmd_pool)->next,
+ struct megasas_cmd_fusion, list);
+ list_del_init(&cmd->list);
+ } else {
+ printk(KERN_ERR "megasas: Command pool (fusion) empty!\n");
+ }
+
+ spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
+ return cmd;
+}
+
+/**
+ * megasas_return_cmd_fusion - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd)
+{
+ unsigned long flags;
+ struct fusion_context *fusion =
+ (struct fusion_context *)instance->ctrl_context;
+
+ spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
+
+ cmd->scmd = NULL;
+ cmd->sync_cmd_idx = (u32)ULONG_MAX;
+ memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+ list_add(&cmd->list, (&fusion->cmd_pool)->next);
+
+ spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
+}
+
+/**
+ * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool
+ * @instance: Adapter soft state
+ * @cmd_mfi: MFI Command packet to be returned to free command pool
+ * @cmd_mpt: MPT Command packet to be returned to free command pool
+ */
+inline void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_mfi,
+ struct megasas_cmd_fusion *cmd_fusion)
+{
+ unsigned long flags;
+
+ /*
+ * TO DO: optimize this code and use only one lock instead of two
+ * locks being used currently- mpt_pool_lock is acquired
+ * inside mfi_pool_lock
+ */
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ if (atomic_read(&cmd_mfi->mfi_mpt_pthr) != MFI_MPT_ATTACHED)
+ dev_err(&instance->pdev->dev, "Possible bug from %s %d\n",
+ __func__, __LINE__);
+ atomic_set(&cmd_mfi->mfi_mpt_pthr, MFI_MPT_DETACHED);
+ __megasas_return_cmd(instance, cmd_mfi);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+}
+
+/**
+ * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool
+ * @instance: Adapter soft state
+ */
+static void megasas_teardown_frame_pool_fusion(
+ struct megasas_instance *instance)
+{
+ int i;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ u16 max_cmd = instance->max_fw_cmds;
+
+ struct megasas_cmd_fusion *cmd;
+
+ if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
+ printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, "
+ "sense pool : %p\n", fusion->sg_dma_pool,
+ fusion->sense_dma_pool);
+ return;
+ }
+
+ /*
+ * Return all frames to pool
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = fusion->cmd_list[i];
+
+ if (cmd->sg_frame)
+ pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
+ cmd->sg_frame_phys_addr);
+
+ if (cmd->sense)
+ pci_pool_free(fusion->sense_dma_pool, cmd->sense,
+ cmd->sense_phys_addr);
+ }
+
+ /*
+ * Now destroy the pool itself
+ */
+ pci_pool_destroy(fusion->sg_dma_pool);
+ pci_pool_destroy(fusion->sense_dma_pool);
+
+ fusion->sg_dma_pool = NULL;
+ fusion->sense_dma_pool = NULL;
+}
+
+/**
+ * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
+ * @instance: Adapter soft state
+ */
+void
+megasas_free_cmds_fusion(struct megasas_instance *instance)
+{
+ int i;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ u32 max_cmds, req_sz, reply_sz, io_frames_sz;
+
+
+ req_sz = fusion->request_alloc_sz;
+ reply_sz = fusion->reply_alloc_sz;
+ io_frames_sz = fusion->io_frames_alloc_sz;
+
+ max_cmds = instance->max_fw_cmds;
+
+ /* Free descriptors and request Frames memory */
+ if (fusion->req_frames_desc)
+ dma_free_coherent(&instance->pdev->dev, req_sz,
+ fusion->req_frames_desc,
+ fusion->req_frames_desc_phys);
+
+ if (fusion->reply_frames_desc) {
+ pci_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc,
+ fusion->reply_frames_desc_phys);
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+ }
+
+ if (fusion->io_request_frames) {
+ pci_pool_free(fusion->io_request_frames_pool,
+ fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ pci_pool_destroy(fusion->io_request_frames_pool);
+ }
+
+ /* Free the Fusion frame pool */
+ megasas_teardown_frame_pool_fusion(instance);
+
+ /* Free all the commands in the cmd_list */
+ for (i = 0; i < max_cmds; i++)
+ kfree(fusion->cmd_list[i]);
+
+ /* Free the cmd_list buffer itself */
+ kfree(fusion->cmd_list);
+ fusion->cmd_list = NULL;
+
+ INIT_LIST_HEAD(&fusion->cmd_pool);
+}
+
+/**
+ * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames
+ * @instance: Adapter soft state
+ *
+ */
+static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
+{
+ int i;
+ u32 max_cmd;
+ struct fusion_context *fusion;
+ struct megasas_cmd_fusion *cmd;
+ u32 total_sz_chain_frame;
+
+ fusion = instance->ctrl_context;
+ max_cmd = instance->max_fw_cmds;
+
+ total_sz_chain_frame = MEGASAS_MAX_SZ_CHAIN_FRAME;
+
+ /*
+ * Use DMA pool facility provided by PCI layer
+ */
+
+ fusion->sg_dma_pool = pci_pool_create("megasas sg pool fusion",
+ instance->pdev,
+ total_sz_chain_frame, 4,
+ 0);
+ if (!fusion->sg_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup request pool "
+ "fusion\n");
+ return -ENOMEM;
+ }
+ fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion",
+ instance->pdev,
+ SCSI_SENSE_BUFFERSIZE, 64, 0);
+
+ if (!fusion->sense_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup sense pool "
+ "fusion\n");
+ pci_pool_destroy(fusion->sg_dma_pool);
+ fusion->sg_dma_pool = NULL;
+ return -ENOMEM;
+ }
+
+ /*
+ * Allocate and attach a frame to each of the commands in cmd_list
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = fusion->cmd_list[i];
+
+ cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
+ GFP_KERNEL,
+ &cmd->sg_frame_phys_addr);
+
+ cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL, &cmd->sense_phys_addr);
+ /*
+ * megasas_teardown_frame_pool_fusion() takes care of freeing
+ * whatever has been allocated
+ */
+ if (!cmd->sg_frame || !cmd->sense) {
+ printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n");
+ megasas_teardown_frame_pool_fusion(instance);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/**
+ * megasas_alloc_cmds_fusion - Allocates the command packets
+ * @instance: Adapter soft state
+ *
+ *
+ * Each frame has a 32-bit field called context. This context is used to get
+ * back the megasas_cmd_fusion from the frame when a frame gets completed
+ * In this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd_fusion given the context.
+ * The free commands themselves are maintained in a linked list called cmd_pool.
+ *
+ * cmds are formed in the io_request and sg_frame members of the
+ * megasas_cmd_fusion. The context field is used to get a request descriptor
+ * and is used as SMID of the cmd.
+ * SMID value range is from 1 to max_fw_cmds.
+ */
+int
+megasas_alloc_cmds_fusion(struct megasas_instance *instance)
+{
+ int i, j, count;
+ u32 max_cmd, io_frames_sz;
+ struct fusion_context *fusion;
+ struct megasas_cmd_fusion *cmd;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+ u32 offset;
+ dma_addr_t io_req_base_phys;
+ u8 *io_req_base;
+
+ fusion = instance->ctrl_context;
+
+ max_cmd = instance->max_fw_cmds;
+
+ fusion->req_frames_desc =
+ dma_alloc_coherent(&instance->pdev->dev,
+ fusion->request_alloc_sz,
+ &fusion->req_frames_desc_phys, GFP_KERNEL);
+
+ if (!fusion->req_frames_desc) {
+ printk(KERN_ERR "megasas; Could not allocate memory for "
+ "request_frames\n");
+ goto fail_req_desc;
+ }
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ fusion->reply_frames_desc_pool =
+ pci_pool_create("reply_frames pool", instance->pdev,
+ fusion->reply_alloc_sz * count, 16, 0);
+
+ if (!fusion->reply_frames_desc_pool) {
+ printk(KERN_ERR "megasas; Could not allocate memory for "
+ "reply_frame pool\n");
+ goto fail_reply_desc;
+ }
+
+ fusion->reply_frames_desc =
+ pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
+ &fusion->reply_frames_desc_phys);
+ if (!fusion->reply_frames_desc) {
+ printk(KERN_ERR "megasas; Could not allocate memory for "
+ "reply_frame pool\n");
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+ goto fail_reply_desc;
+ }
+
+ reply_desc = fusion->reply_frames_desc;
+ for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
+ reply_desc->Words = ULLONG_MAX;
+
+ io_frames_sz = fusion->io_frames_alloc_sz;
+
+ fusion->io_request_frames_pool =
+ pci_pool_create("io_request_frames pool", instance->pdev,
+ fusion->io_frames_alloc_sz, 16, 0);
+
+ if (!fusion->io_request_frames_pool) {
+ printk(KERN_ERR "megasas: Could not allocate memory for "
+ "io_request_frame pool\n");
+ goto fail_io_frames;
+ }
+
+ fusion->io_request_frames =
+ pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
+ &fusion->io_request_frames_phys);
+ if (!fusion->io_request_frames) {
+ printk(KERN_ERR "megasas: Could not allocate memory for "
+ "io_request_frames frames\n");
+ pci_pool_destroy(fusion->io_request_frames_pool);
+ goto fail_io_frames;
+ }
+
+ /*
+ * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
+ * max_cmd, GFP_KERNEL);
+
+ if (!fusion->cmd_list) {
+ printk(KERN_DEBUG "megasas: out of memory. Could not alloc "
+ "memory for cmd_list_fusion\n");
+ goto fail_cmd_list;
+ }
+
+ max_cmd = instance->max_fw_cmds;
+ for (i = 0; i < max_cmd; i++) {
+ fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
+ GFP_KERNEL);
+ if (!fusion->cmd_list[i]) {
+ printk(KERN_ERR "Could not alloc cmd list fusion\n");
+
+ for (j = 0; j < i; j++)
+ kfree(fusion->cmd_list[j]);
+
+ kfree(fusion->cmd_list);
+ fusion->cmd_list = NULL;
+ goto fail_cmd_list;
+ }
+ }
+
+ /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */
+ io_req_base = fusion->io_request_frames +
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ io_req_base_phys = fusion->io_request_frames_phys +
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+
+ /*
+ * Add all the commands to command pool (fusion->cmd_pool)
+ */
+
+ /* SMID 0 is reserved. Set SMID/index from 1 */
+ for (i = 0; i < max_cmd; i++) {
+ cmd = fusion->cmd_list[i];
+ offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
+ memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
+ cmd->index = i + 1;
+ cmd->scmd = NULL;
+ cmd->sync_cmd_idx = (u32)ULONG_MAX; /* Set to Invalid */
+ cmd->instance = instance;
+ cmd->io_request =
+ (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ (io_req_base + offset);
+ memset(cmd->io_request, 0,
+ sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+ cmd->io_request_phys_addr = io_req_base_phys + offset;
+
+ list_add_tail(&cmd->list, &fusion->cmd_pool);
+ }
+
+ /*
+ * Create a frame pool and assign one frame to each cmd
+ */
+ if (megasas_create_frame_pool_fusion(instance)) {
+ printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+ megasas_free_cmds_fusion(instance);
+ goto fail_req_desc;
+ }
+
+ return 0;
+
+fail_cmd_list:
+ pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ pci_pool_destroy(fusion->io_request_frames_pool);
+fail_io_frames:
+ dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
+ fusion->reply_frames_desc,
+ fusion->reply_frames_desc_phys);
+ pci_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc,
+ fusion->reply_frames_desc_phys);
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+
+fail_reply_desc:
+ dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
+ fusion->req_frames_desc,
+ fusion->req_frames_desc_phys);
+fail_req_desc:
+ return -ENOMEM;
+}
+
+/**
+ * wait_and_poll - Issues a polling command
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be issued
+ *
+ * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
+ */
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds)
+{
+ int i;
+ struct megasas_header *frame_hdr = &cmd->frame->hdr;
+ struct fusion_context *fusion;
+
+ u32 msecs = seconds * 1000;
+
+ fusion = instance->ctrl_context;
+ /*
+ * Wait for cmd_status to change
+ */
+ for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
+ rmb();
+ msleep(20);
+ }
+
+ if (frame_hdr->cmd_status == 0xff) {
+ if (fusion)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+/**
+ * megasas_ioc_init_fusion - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * Issues the IOC Init cmd
+ */
+int
+megasas_ioc_init_fusion(struct megasas_instance *instance)
+{
+ struct megasas_init_frame *init_frame;
+ struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
+ dma_addr_t ioc_init_handle;
+ struct megasas_cmd *cmd;
+ u8 ret;
+ struct fusion_context *fusion;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
+ int i;
+ struct megasas_header *frame_hdr;
+
+ fusion = instance->ctrl_context;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_ERR "Could not allocate cmd for INIT Frame\n");
+ ret = 1;
+ goto fail_get_cmd;
+ }
+
+ IOCInitMessage =
+ dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ &ioc_init_handle, GFP_KERNEL);
+
+ if (!IOCInitMessage) {
+ printk(KERN_ERR "Could not allocate memory for "
+ "IOCInitMessage\n");
+ ret = 1;
+ goto fail_fw_init;
+ }
+
+ memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
+
+ IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
+ IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
+ IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+ IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
+
+ IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
+ IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
+ IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
+ IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
+ init_frame = (struct megasas_init_frame *)cmd->frame;
+ memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+
+ frame_hdr = &cmd->frame->hdr;
+ frame_hdr->cmd_status = 0xFF;
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+
+ init_frame->cmd = MFI_CMD_INIT;
+ init_frame->cmd_status = 0xFF;
+
+ /* driver support Extended MSIX */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ init_frame->driver_operations.
+ mfi_capabilities.support_additional_msix = 1;
+ /* driver supports HA / Remote LUN over Fast Path interface */
+ init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
+ = 1;
+ init_frame->driver_operations.mfi_capabilities.support_max_255lds
+ = 1;
+ init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb
+ = 1;
+ init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw
+ = 1;
+ /* Convert capability to LE32 */
+ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
+
+ init_frame->queue_info_new_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(ioc_init_handle));
+ init_frame->queue_info_new_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(ioc_init_handle));
+ init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
+
+ req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
+ req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
+ req_desc.MFAIo.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+ /*
+ * disable the intr before firing the init frame
+ */
+ instance->instancet->disable_intr(instance);
+
+ for (i = 0; i < (10 * 1000); i += 20) {
+ if (readl(&instance->reg_set->doorbell) & 1)
+ msleep(20);
+ else
+ break;
+ }
+
+ instance->instancet->fire_cmd(instance, req_desc.u.low,
+ req_desc.u.high, instance->reg_set);
+
+ wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
+
+ frame_hdr = &cmd->frame->hdr;
+ if (frame_hdr->cmd_status != 0) {
+ ret = 1;
+ goto fail_fw_init;
+ }
+ printk(KERN_ERR "megasas:IOC Init cmd success\n");
+
+ ret = 0;
+
+fail_fw_init:
+ megasas_return_cmd(instance, cmd);
+ if (IOCInitMessage)
+ dma_free_coherent(&instance->pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ IOCInitMessage, ioc_init_handle);
+fail_get_cmd:
+ return ret;
+}
+
+/*
+ * megasas_get_ld_map_info - Returns FW's ld_map structure
+ * @instance: Adapter soft state
+ * @pend: Pend the command or not
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
+ * dcmd.mbox.b[0] - number of LDs being sync'd
+ * dcmd.mbox.b[1] - 0 - complete command immediately.
+ * - 1 - pend till config change
+ * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
+ * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
+ * uses extended struct MR_FW_RAID_MAP_EXT
+ */
+static int
+megasas_get_ld_map_info(struct megasas_instance *instance)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ void *ci;
+ dma_addr_t ci_h = 0;
+ u32 size_map_info;
+ struct fusion_context *fusion;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n");
+ return -ENOMEM;
+ }
+
+ fusion = instance->ctrl_context;
+
+ if (!fusion) {
+ megasas_return_cmd(instance, cmd);
+ return -ENXIO;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ size_map_info = fusion->current_map_sz;
+
+ ci = (void *) fusion->ld_map[(instance->map_id & 1)];
+ ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, fusion->max_map_sz);
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+#if VD_EXT_DEBUG
+ dev_dbg(&instance->pdev->dev,
+ "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
+ __func__, cpu_to_le32(size_map_info));
+#endif
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
+u8
+megasas_get_map_info(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ fusion->fast_path_io = 0;
+ if (!megasas_get_ld_map_info(instance)) {
+ if (MR_ValidateMapInfo(instance)) {
+ fusion->fast_path_io = 1;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * megasas_sync_map_info - Returns FW's ld_map structure
+ * @instance: Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+int
+megasas_sync_map_info(struct megasas_instance *instance)
+{
+ int ret = 0, i;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ u32 size_sync_info, num_lds;
+ struct fusion_context *fusion;
+ struct MR_LD_TARGET_SYNC *ci = NULL;
+ struct MR_DRV_RAID_MAP_ALL *map;
+ struct MR_LD_RAID *raid;
+ struct MR_LD_TARGET_SYNC *ld_sync;
+ dma_addr_t ci_h = 0;
+ u32 size_map_info;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get cmd for sync"
+ "info.\n");
+ return -ENOMEM;
+ }
+
+ fusion = instance->ctrl_context;
+
+ if (!fusion) {
+ megasas_return_cmd(instance, cmd);
+ return 1;
+ }
+
+ map = fusion->ld_drv_map[instance->map_id & 1];
+
+ num_lds = le16_to_cpu(map->raidMap.ldCount);
+
+ dcmd = &cmd->frame->dcmd;
+
+ size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ ci = (struct MR_LD_TARGET_SYNC *)
+ fusion->ld_map[(instance->map_id - 1) & 1];
+ memset(ci, 0, fusion->max_map_sz);
+
+ ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
+
+ ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
+
+ for (i = 0; i < num_lds; i++, ld_sync++) {
+ raid = MR_LdRaidGet(i, map);
+ ld_sync->targetId = MR_GetLDTgtId(i, map);
+ ld_sync->seqNum = raid->seqNum;
+ }
+
+ size_map_info = fusion->current_map_sz;
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+ dcmd->mbox.b[0] = num_lds;
+ dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ instance->map_update_cmd = cmd;
+
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ return ret;
+}
+
+/*
+ * meagasas_display_intel_branding - Display branding string
+ * @instance: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+megasas_display_intel_branding(struct megasas_instance *instance)
+{
+ if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
+ return;
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_INVADER:
+ switch (instance->pdev->subsystem_device) {
+ case MEGARAID_INTEL_RS3DC080_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3DC080_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3DC040_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3DC040_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3SC008_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3SC008_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3MC044_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3MC044_BRANDING);
+ break;
+ default:
+ break;
+ }
+ break;
+ case PCI_DEVICE_ID_LSI_FURY:
+ switch (instance->pdev->subsystem_device) {
+ case MEGARAID_INTEL_RS3WC080_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3WC080_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3WC040_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3WC040_BRANDING);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * megasas_init_adapter_fusion - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * This is the main function for initializing firmware.
+ */
+u32
+megasas_init_adapter_fusion(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *reg_set;
+ struct fusion_context *fusion;
+ u32 max_cmd;
+ int i = 0, count;
+
+ fusion = instance->ctrl_context;
+
+ reg_set = instance->reg_set;
+
+ /*
+ * Get various operational parameters from status register
+ */
+ instance->max_fw_cmds =
+ instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008);
+
+ /*
+ * Reduce the max supported cmds by 1. This is to ensure that the
+ * reply_q_sz (1 more than the max cmd that driver may send)
+ * does not exceed max cmds that the FW can support
+ */
+ instance->max_fw_cmds = instance->max_fw_cmds-1;
+
+ /*
+ * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
+ */
+ instance->max_mfi_cmds =
+ MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
+
+ max_cmd = instance->max_fw_cmds;
+
+ fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
+
+ fusion->request_alloc_sz =
+ sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
+ fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
+ *(fusion->reply_q_depth);
+ fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
+ (max_cmd + 1)); /* Extra 1 for SMID 0 */
+
+ fusion->max_sge_in_main_msg =
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
+
+ fusion->max_sge_in_chain =
+ MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION);
+
+ instance->max_num_sge = rounddown_pow_of_two(
+ fusion->max_sge_in_main_msg + fusion->max_sge_in_chain - 2);
+
+ /* Used for pass thru MFI frame (DCMD) */
+ fusion->chain_offset_mfi_pthru =
+ offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
+
+ fusion->chain_offset_io_request =
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ sizeof(union MPI2_SGE_IO_UNION))/16;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ for (i = 0 ; i < count; i++)
+ fusion->last_reply_idx[i] = 0;
+
+ /*
+ * Allocate memory for descriptors
+ * Create a pool of commands
+ */
+ if (megasas_alloc_cmds(instance))
+ goto fail_alloc_mfi_cmds;
+ if (megasas_alloc_cmds_fusion(instance))
+ goto fail_alloc_cmds;
+
+ if (megasas_ioc_init_fusion(instance))
+ goto fail_ioc_init;
+
+ megasas_display_intel_branding(instance);
+ if (megasas_get_ctrl_info(instance)) {
+ dev_err(&instance->pdev->dev,
+ "Could not get controller info. Fail from %s %d\n",
+ __func__, __LINE__);
+ goto fail_ioc_init;
+ }
+
+ instance->flag_ieee = 1;
+ fusion->fast_path_io = 0;
+
+ fusion->drv_map_pages = get_order(fusion->drv_map_sz);
+ for (i = 0; i < 2; i++) {
+ fusion->ld_map[i] = NULL;
+ fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
+ fusion->drv_map_pages);
+ if (!fusion->ld_drv_map[i]) {
+ dev_err(&instance->pdev->dev, "Could not allocate "
+ "memory for local map info for %d pages\n",
+ fusion->drv_map_pages);
+ if (i == 1)
+ free_pages((ulong)fusion->ld_drv_map[0],
+ fusion->drv_map_pages);
+ goto fail_ioc_init;
+ }
+ memset(fusion->ld_drv_map[i], 0,
+ ((1 << PAGE_SHIFT) << fusion->drv_map_pages));
+ }
+
+ for (i = 0; i < 2; i++) {
+ fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
+ fusion->max_map_sz,
+ &fusion->ld_map_phys[i],
+ GFP_KERNEL);
+ if (!fusion->ld_map[i]) {
+ printk(KERN_ERR "megasas: Could not allocate memory "
+ "for map info\n");
+ goto fail_map_info;
+ }
+ }
+
+ if (!megasas_get_map_info(instance))
+ megasas_sync_map_info(instance);
+
+ return 0;
+
+fail_map_info:
+ if (i == 1)
+ dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
+ fusion->ld_map[0], fusion->ld_map_phys[0]);
+fail_ioc_init:
+ megasas_free_cmds_fusion(instance);
+fail_alloc_cmds:
+ megasas_free_cmds(instance);
+fail_alloc_mfi_cmds:
+ return 1;
+}
+
+/**
+ * megasas_fire_cmd_fusion - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+void
+megasas_fire_cmd_fusion(struct megasas_instance *instance,
+ dma_addr_t req_desc_lo,
+ u32 req_desc_hi,
+ struct megasas_register_set __iomem *regs)
+{
+#if defined(writeq) && defined(CONFIG_64BIT)
+ u64 req_data = (((u64)le32_to_cpu(req_desc_hi) << 32) |
+ le32_to_cpu(req_desc_lo));
+
+ writeq(req_data, &(regs)->inbound_low_queue_port);
+#else
+ unsigned long flags;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+
+ writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
+ writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+#endif
+}
+
+/**
+ * map_cmd_status - Maps FW cmd status to OS cmd status
+ * @cmd : Pointer to cmd
+ * @status : status of cmd returned by FW
+ * @ext_status : ext status of cmd returned by FW
+ */
+
+void
+map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
+{
+
+ switch (status) {
+
+ case MFI_STAT_OK:
+ cmd->scmd->result = DID_OK << 16;
+ break;
+
+ case MFI_STAT_SCSI_IO_FAILED:
+ case MFI_STAT_LD_INIT_IN_PROGRESS:
+ cmd->scmd->result = (DID_ERROR << 16) | ext_status;
+ break;
+
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+ cmd->scmd->result = (DID_OK << 16) | ext_status;
+ if (ext_status == SAM_STAT_CHECK_CONDITION) {
+ memset(cmd->scmd->sense_buffer, 0,
+ SCSI_SENSE_BUFFERSIZE);
+ memcpy(cmd->scmd->sense_buffer, cmd->sense,
+ SCSI_SENSE_BUFFERSIZE);
+ cmd->scmd->result |= DRIVER_SENSE << 24;
+ }
+ break;
+
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ cmd->scmd->result = DID_BAD_TARGET << 16;
+ break;
+ case MFI_STAT_CONFIG_SEQ_MISMATCH:
+ cmd->scmd->result = DID_IMM_RETRY << 16;
+ break;
+ default:
+ printk(KERN_DEBUG "megasas: FW status %#x\n", status);
+ cmd->scmd->result = DID_ERROR << 16;
+ break;
+ }
+}
+
+/**
+ * megasas_make_sgl_fusion - Prepares 32-bit SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @sgl_ptr: SGL to be filled in
+ * @cmd: cmd we are working on
+ *
+ * If successful, this function returns the number of SG elements.
+ */
+static int
+megasas_make_sgl_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scp,
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
+ struct megasas_cmd_fusion *cmd)
+{
+ int i, sg_processed, sge_count;
+ struct scatterlist *os_sgl;
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
+ sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+
+ sge_count = scsi_dma_map(scp);
+
+ BUG_ON(sge_count < 0);
+
+ if (sge_count > instance->max_num_sge || !sge_count)
+ return sge_count;
+
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
+ sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
+ sgl_ptr->Flags = 0;
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ if (i == sge_count - 1)
+ sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
+ }
+ sgl_ptr++;
+
+ sg_processed = i + 1;
+
+ if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) &&
+ (sge_count > fusion->max_sge_in_main_msg)) {
+
+ struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ if ((le16_to_cpu(cmd->io_request->IoFlags) &
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ cmd->io_request->ChainOffset =
+ fusion->
+ chain_offset_io_request;
+ else
+ cmd->io_request->ChainOffset = 0;
+ } else
+ cmd->io_request->ChainOffset =
+ fusion->chain_offset_io_request;
+
+ sg_chain = sgl_ptr;
+ /* Prepare chain element */
+ sg_chain->NextChainOffset = 0;
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY))
+ sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
+ else
+ sg_chain->Flags =
+ (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
+ sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
+ sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
+
+ sgl_ptr =
+ (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
+ memset(sgl_ptr, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
+ }
+ }
+
+ return sge_count;
+}
+
+/**
+ * megasas_set_pd_lba - Sets PD LBA
+ * @cdb: CDB
+ * @cdb_len: cdb length
+ * @start_blk: Start block of IO
+ *
+ * Used to set the PD LBA in CDB for FP IOs
+ */
+void
+megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
+ struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
+{
+ struct MR_LD_RAID *raid;
+ u32 ld;
+ u64 start_blk = io_info->pdBlock;
+ u8 *cdb = io_request->CDB.CDB32;
+ u32 num_blocks = io_info->numBlocks;
+ u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
+
+ /* Check if T10 PI (DIF) is enabled for this LD */
+ ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+ if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+ cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
+ cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
+
+ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
+ else
+ cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
+ cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
+
+ /* LBA */
+ cdb[12] = (u8)((start_blk >> 56) & 0xff);
+ cdb[13] = (u8)((start_blk >> 48) & 0xff);
+ cdb[14] = (u8)((start_blk >> 40) & 0xff);
+ cdb[15] = (u8)((start_blk >> 32) & 0xff);
+ cdb[16] = (u8)((start_blk >> 24) & 0xff);
+ cdb[17] = (u8)((start_blk >> 16) & 0xff);
+ cdb[18] = (u8)((start_blk >> 8) & 0xff);
+ cdb[19] = (u8)(start_blk & 0xff);
+
+ /* Logical block reference tag */
+ io_request->CDB.EEDP32.PrimaryReferenceTag =
+ cpu_to_be32(ref_tag);
+ io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
+ io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
+
+ /* Transfer length */
+ cdb[28] = (u8)((num_blocks >> 24) & 0xff);
+ cdb[29] = (u8)((num_blocks >> 16) & 0xff);
+ cdb[30] = (u8)((num_blocks >> 8) & 0xff);
+ cdb[31] = (u8)(num_blocks & 0xff);
+
+ /* set SCSI IO EEDPFlags */
+ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
+ io_request->EEDPFlags = cpu_to_le16(
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
+ } else {
+ io_request->EEDPFlags = cpu_to_le16(
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
+ }
+ io_request->Control |= cpu_to_le32((0x4 << 26));
+ io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
+ } else {
+ /* Some drives don't support 16/12 byte CDB's, convert to 10 */
+ if (((cdb_len == 12) || (cdb_len == 16)) &&
+ (start_blk <= 0xffffffff)) {
+ if (cdb_len == 16) {
+ opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[14];
+ control = cdb[15];
+ } else {
+ opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[6] = groupnum;
+ cdb[9] = control;
+
+ /* Transfer length */
+ cdb[8] = (u8)(num_blocks & 0xff);
+ cdb[7] = (u8)((num_blocks >> 8) & 0xff);
+
+ io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
+ cdb_len = 10;
+ } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
+ /* Convert to 16 byte CDB for large LBA's */
+ switch (cdb_len) {
+ case 6:
+ opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
+ control = cdb[5];
+ break;
+ case 10:
+ opcode =
+ cdb[0] == READ_10 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[6];
+ control = cdb[9];
+ break;
+ case 12:
+ opcode =
+ cdb[0] == READ_12 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ break;
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[14] = groupnum;
+ cdb[15] = control;
+
+ /* Transfer length */
+ cdb[13] = (u8)(num_blocks & 0xff);
+ cdb[12] = (u8)((num_blocks >> 8) & 0xff);
+ cdb[11] = (u8)((num_blocks >> 16) & 0xff);
+ cdb[10] = (u8)((num_blocks >> 24) & 0xff);
+
+ io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
+ cdb_len = 16;
+ }
+
+ /* Normal case, just load LBA here */
+ switch (cdb_len) {
+ case 6:
+ {
+ u8 val = cdb[1] & 0xE0;
+ cdb[3] = (u8)(start_blk & 0xff);
+ cdb[2] = (u8)((start_blk >> 8) & 0xff);
+ cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
+ break;
+ }
+ case 10:
+ cdb[5] = (u8)(start_blk & 0xff);
+ cdb[4] = (u8)((start_blk >> 8) & 0xff);
+ cdb[3] = (u8)((start_blk >> 16) & 0xff);
+ cdb[2] = (u8)((start_blk >> 24) & 0xff);
+ break;
+ case 12:
+ cdb[5] = (u8)(start_blk & 0xff);
+ cdb[4] = (u8)((start_blk >> 8) & 0xff);
+ cdb[3] = (u8)((start_blk >> 16) & 0xff);
+ cdb[2] = (u8)((start_blk >> 24) & 0xff);
+ break;
+ case 16:
+ cdb[9] = (u8)(start_blk & 0xff);
+ cdb[8] = (u8)((start_blk >> 8) & 0xff);
+ cdb[7] = (u8)((start_blk >> 16) & 0xff);
+ cdb[6] = (u8)((start_blk >> 24) & 0xff);
+ cdb[5] = (u8)((start_blk >> 32) & 0xff);
+ cdb[4] = (u8)((start_blk >> 40) & 0xff);
+ cdb[3] = (u8)((start_blk >> 48) & 0xff);
+ cdb[2] = (u8)((start_blk >> 56) & 0xff);
+ break;
+ }
+ }
+}
+
+/**
+ * megasas_build_ldio_fusion - Prepares IOs to devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Prepares the io_request and chain elements (sg_frame) for IO
+ * The IO can be for PD (Fast Path) or LD
+ */
+void
+megasas_build_ldio_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scp,
+ struct megasas_cmd_fusion *cmd)
+{
+ u8 fp_possible;
+ u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct IO_REQUEST_INFO io_info;
+ struct fusion_context *fusion;
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
+ u8 *raidLUN;
+
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+
+ fusion = instance->ctrl_context;
+
+ io_request = cmd->io_request;
+ io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
+ io_request->RaidContext.status = 0;
+ io_request->RaidContext.exStatus = 0;
+
+ req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+ fp_possible = 0;
+
+ /*
+ * 6-byte READ(0x08) or WRITE(0x0A) cdb
+ */
+ if (scp->cmd_len == 6) {
+ datalength = (u32) scp->cmnd[4];
+ start_lba_lo = ((u32) scp->cmnd[1] << 16) |
+ ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
+
+ start_lba_lo &= 0x1FFFFF;
+ }
+
+ /*
+ * 10-byte READ(0x28) or WRITE(0x2A) cdb
+ */
+ else if (scp->cmd_len == 10) {
+ datalength = (u32) scp->cmnd[8] |
+ ((u32) scp->cmnd[7] << 8);
+ start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ }
+
+ /*
+ * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+ */
+ else if (scp->cmd_len == 12) {
+ datalength = ((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+ start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ }
+
+ /*
+ * 16-byte READ(0x88) or WRITE(0x8A) cdb
+ */
+ else if (scp->cmd_len == 16) {
+ datalength = ((u32) scp->cmnd[10] << 24) |
+ ((u32) scp->cmnd[11] << 16) |
+ ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
+ start_lba_lo = ((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+
+ start_lba_hi = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ }
+
+ memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
+ io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
+ io_info.numBlocks = datalength;
+ io_info.ldTgtId = device_id;
+ io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
+
+ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ io_info.isRead = 1;
+
+ local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
+
+ if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
+ instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
+ io_request->RaidContext.regLockFlags = 0;
+ fp_possible = 0;
+ } else {
+ if (MR_BuildRaidContext(instance, &io_info,
+ &io_request->RaidContext,
+ local_map_ptr, &raidLUN))
+ fp_possible = io_info.fpOkForIo;
+ }
+
+ /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
+ id by default, not CPU group id, otherwise all MSI-X queues won't
+ be utilized */
+ cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
+ raw_smp_processor_id() % instance->msix_vectors : 0;
+
+ if (fp_possible) {
+ megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
+ local_map_ptr, start_lba_lo);
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
+ << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ if (io_request->RaidContext.regLockFlags ==
+ REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.nseg = 0x1;
+ io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ }
+ if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
+ (io_info.isRead)) {
+ io_info.devHandle =
+ get_updated_dev_handle(instance,
+ &fusion->load_balance_info[device_id],
+ &io_info);
+ scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
+ cmd->pd_r1_lb = io_info.pd_after_lb;
+ } else
+ scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+ cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
+ io_request->DevHandle = io_info.devHandle;
+ /* populate the LUN field */
+ memcpy(io_request->LUN, raidLUN, 8);
+ } else {
+ io_request->RaidContext.timeoutValue =
+ cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
+ << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ if (io_request->RaidContext.regLockFlags ==
+ REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ io_request->RaidContext.nseg = 0x1;
+ }
+ io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = cpu_to_le16(device_id);
+ } /* Not FP */
+}
+
+/**
+ * megasas_build_dcdb_fusion - Prepares IOs to devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Prepares the io_request frame for non-io cmds
+ */
+static void
+megasas_build_dcdb_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd,
+ struct megasas_cmd_fusion *cmd)
+{
+ u32 device_id;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+ u16 pd_index = 0;
+ u16 os_timeout_value;
+ u16 timeout_limit;
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
+ struct fusion_context *fusion = instance->ctrl_context;
+ u8 span, physArm;
+ u16 devHandle;
+ u32 ld, arRef, pd;
+ struct MR_LD_RAID *raid;
+ struct RAID_CONTEXT *pRAID_Context;
+
+ io_request = cmd->io_request;
+ device_id = MEGASAS_DEV_INDEX(instance, scmd);
+ pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
+ +scmd->device->id;
+ local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
+
+ io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+
+ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
+ instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
+ if (fusion->fast_path_io)
+ io_request->DevHandle =
+ local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ io_request->RaidContext.RAIDFlags =
+ MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+ cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->msix_vectors ?
+ raw_smp_processor_id() %
+ instance->msix_vectors :
+ 0;
+ os_timeout_value = scmd->request->timeout / HZ;
+
+ if (instance->secure_jbod_support &&
+ (megasas_cmd_type(scmd) == NON_READ_WRITE_SYSPDIO)) {
+ /* system pd firmware path */
+ io_request->Function =
+ MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.timeoutValue =
+ cpu_to_le16(os_timeout_value);
+ } else {
+ /* system pd Fast Path */
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ io_request->RaidContext.regLockFlags = 0;
+ io_request->RaidContext.regLockRowLBA = 0;
+ io_request->RaidContext.regLockLength = 0;
+ timeout_limit = (scmd->device->type == TYPE_DISK) ?
+ 255 : 0xFFFF;
+ io_request->RaidContext.timeoutValue =
+ cpu_to_le16((os_timeout_value > timeout_limit) ?
+ timeout_limit : os_timeout_value);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ io_request->IoFlags |=
+ cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ }
+ } else {
+ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
+ goto NonFastPath;
+
+ /*
+ * For older firmware, Driver should not access ldTgtIdToLd
+ * beyond index 127 and for Extended VD firmware, ldTgtIdToLd
+ * should not go beyond 255.
+ */
+
+ if ((!fusion->fast_path_io) ||
+ (device_id >= instance->fw_supported_vd_count))
+ goto NonFastPath;
+
+ ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+
+ if (ld >= instance->fw_supported_vd_count)
+ goto NonFastPath;
+
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+
+ /* check if this LD is FP capable */
+ if (!(raid->capability.fpNonRWCapable))
+ /* not FP capable, send as non-FP */
+ goto NonFastPath;
+
+ /* get RAID_Context pointer */
+ pRAID_Context = &io_request->RaidContext;
+
+ /* set RAID context values */
+ pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
+ pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
+ pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
+ pRAID_Context->regLockRowLBA = 0;
+ pRAID_Context->regLockLength = 0;
+ pRAID_Context->configSeqNum = raid->seqNum;
+
+ /* get the DevHandle for the PD (since this is
+ fpNonRWCapable, this is a single disk RAID0) */
+ span = physArm = 0;
+ arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
+ pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
+ devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
+
+ /* build request descriptor */
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cmd->request_desc->SCSIIO.DevHandle = devHandle;
+
+ /* populate the LUN field */
+ memcpy(io_request->LUN, raid->LUN, 8);
+
+ /* build the raidScsiIO structure */
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ io_request->DevHandle = devHandle;
+
+ return;
+
+NonFastPath:
+ io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = cpu_to_le16(device_id);
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ }
+ io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
+ int_to_scsilun(scmd->device->lun, (struct scsi_lun *)io_request->LUN);
+}
+
+/**
+ * megasas_build_io_fusion - Prepares IOs to devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Invokes helper functions to prepare request frames
+ * and sets flags appropriate for IO/Non-IO cmd
+ */
+int
+megasas_build_io_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scp,
+ struct megasas_cmd_fusion *cmd)
+{
+ u32 device_id, sge_count;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
+
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+
+ /* Zero out some fields so they don't get reused */
+ memset(io_request->LUN, 0x0, 8);
+ io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
+ io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
+ io_request->EEDPFlags = 0;
+ io_request->Control = 0;
+ io_request->EEDPBlockSize = 0;
+ io_request->ChainOffset = 0;
+ io_request->RaidContext.RAIDFlags = 0;
+ io_request->RaidContext.Type = 0;
+ io_request->RaidContext.nseg = 0;
+
+ memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
+ /*
+ * Just the CDB length,rest of the Flags are zero
+ * This will be modified for FP in build_ldio_fusion
+ */
+ io_request->IoFlags = cpu_to_le16(scp->cmd_len);
+
+ if (megasas_cmd_type(scp) == READ_WRITE_LDIO)
+ megasas_build_ldio_fusion(instance, scp, cmd);
+ else
+ megasas_build_dcdb_fusion(instance, scp, cmd);
+
+ /*
+ * Construct SGL
+ */
+
+ sge_count =
+ megasas_make_sgl_fusion(instance, scp,
+ (struct MPI25_IEEE_SGE_CHAIN64 *)
+ &io_request->SGL, cmd);
+
+ if (sge_count > instance->max_num_sge) {
+ printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds "
+ "max (0x%x) allowed\n", sge_count,
+ instance->max_num_sge);
+ return 1;
+ }
+
+ io_request->RaidContext.numSGE = sge_count;
+
+ io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
+
+ if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
+ else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
+
+ io_request->SGLOffset0 =
+ offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
+
+ io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
+ io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+
+ cmd->scmd = scp;
+ scp->SCp.ptr = (char *)cmd;
+
+ return 0;
+}
+
+union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
+{
+ u8 *p;
+ struct fusion_context *fusion;
+
+ if (index >= instance->max_fw_cmds) {
+ printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
+ "descriptor for scsi%d\n", index,
+ instance->host->host_no);
+ return NULL;
+ }
+ fusion = instance->ctrl_context;
+ p = fusion->req_frames_desc
+ +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
+
+ return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
+}
+
+/**
+ * megasas_build_and_issue_cmd_fusion -Main routine for building and
+ * issuing non IOCTL cmd
+ * @instance: Adapter soft state
+ * @scmd: pointer to scsi cmd from OS
+ */
+static u32
+megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ struct megasas_cmd_fusion *cmd;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u32 index;
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ cmd = megasas_get_cmd_fusion(instance);
+ if (!cmd)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ index = cmd->index;
+
+ req_desc = megasas_get_request_descriptor(instance, index-1);
+ if (!req_desc)
+ return 1;
+
+ req_desc->Words = 0;
+ cmd->request_desc = req_desc;
+
+ if (megasas_build_io_fusion(instance, scmd, cmd)) {
+ megasas_return_cmd_fusion(instance, cmd);
+ printk(KERN_ERR "megasas: Error building command.\n");
+ cmd->request_desc = NULL;
+ return 1;
+ }
+
+ req_desc = cmd->request_desc;
+ req_desc->SCSIIO.SMID = cpu_to_le16(index);
+
+ if (cmd->io_request->ChainOffset != 0 &&
+ cmd->io_request->ChainOffset != 0xF)
+ printk(KERN_ERR "megasas: The chain offset value is not "
+ "correct : %x\n", cmd->io_request->ChainOffset);
+
+ /*
+ * Issue the command to the FW
+ */
+ atomic_inc(&instance->fw_outstanding);
+
+ instance->instancet->fire_cmd(instance,
+ req_desc->u.low, req_desc->u.high,
+ instance->reg_set);
+
+ return 0;
+}
+
+/**
+ * complete_cmd_fusion - Completes command
+ * @instance: Adapter soft state
+ * Completes all commands that is in reply descriptor queue
+ */
+int
+complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
+{
+ union MPI2_REPLY_DESCRIPTORS_UNION *desc;
+ struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
+ struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd_mfi;
+ struct megasas_cmd_fusion *cmd_fusion;
+ u16 smid, num_completed;
+ u8 reply_descript_type;
+ u32 status, extStatus, device_id;
+ union desc_value d_val;
+ struct LD_LOAD_BALANCE_INFO *lbinfo;
+ int threshold_reply_count = 0;
+
+ fusion = instance->ctrl_context;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ return IRQ_HANDLED;
+
+ desc = fusion->reply_frames_desc;
+ desc += ((MSIxIndex * fusion->reply_alloc_sz)/
+ sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) +
+ fusion->last_reply_idx[MSIxIndex];
+
+ reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+ d_val.word = desc->Words;
+
+ reply_descript_type = reply_desc->ReplyFlags &
+ MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ return IRQ_NONE;
+
+ num_completed = 0;
+
+ while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
+ smid = le16_to_cpu(reply_desc->SMID);
+
+ cmd_fusion = fusion->cmd_list[smid - 1];
+
+ scsi_io_req =
+ (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ cmd_fusion->io_request;
+
+ if (cmd_fusion->scmd)
+ cmd_fusion->scmd->SCp.ptr = NULL;
+
+ status = scsi_io_req->RaidContext.status;
+ extStatus = scsi_io_req->RaidContext.exStatus;
+
+ switch (scsi_io_req->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
+ /* Update load balancing info */
+ device_id = MEGASAS_DEV_INDEX(instance,
+ cmd_fusion->scmd);
+ lbinfo = &fusion->load_balance_info[device_id];
+ if (cmd_fusion->scmd->SCp.Status &
+ MEGASAS_LOAD_BALANCE_FLAG) {
+ atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
+ cmd_fusion->scmd->SCp.Status &=
+ ~MEGASAS_LOAD_BALANCE_FLAG;
+ }
+ if (reply_descript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
+ if (megasas_dbg_lvl == 5)
+ printk(KERN_ERR "\nmegasas: FAST Path "
+ "IO Success\n");
+ }
+ /* Fall thru and complete IO */
+ case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
+ /* Map the FW Cmd Status */
+ map_cmd_status(cmd_fusion, status, extStatus);
+ scsi_dma_unmap(cmd_fusion->scmd);
+ cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
+ scsi_io_req->RaidContext.status = 0;
+ scsi_io_req->RaidContext.exStatus = 0;
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ atomic_dec(&instance->fw_outstanding);
+
+ break;
+ case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
+ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+
+ if (!cmd_mfi->mpt_pthr_cmd_blocked) {
+ if (megasas_dbg_lvl == 5)
+ dev_info(&instance->pdev->dev,
+ "freeing mfi/mpt pass-through "
+ "from %s %d\n",
+ __func__, __LINE__);
+ megasas_return_mfi_mpt_pthr(instance, cmd_mfi,
+ cmd_fusion);
+ }
+
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+ cmd_fusion->flags = 0;
+ break;
+ }
+
+ fusion->last_reply_idx[MSIxIndex]++;
+ if (fusion->last_reply_idx[MSIxIndex] >=
+ fusion->reply_q_depth)
+ fusion->last_reply_idx[MSIxIndex] = 0;
+
+ desc->Words = ULLONG_MAX;
+ num_completed++;
+ threshold_reply_count++;
+
+ /* Get the next reply descriptor */
+ if (!fusion->last_reply_idx[MSIxIndex])
+ desc = fusion->reply_frames_desc +
+ ((MSIxIndex * fusion->reply_alloc_sz)/
+ sizeof(union MPI2_REPLY_DESCRIPTORS_UNION));
+ else
+ desc++;
+
+ reply_desc =
+ (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+ d_val.word = desc->Words;
+
+ reply_descript_type = reply_desc->ReplyFlags &
+ MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ break;
+ /*
+ * Write to reply post host index register after completing threshold
+ * number of reply counts and still there are more replies in reply queue
+ * pending to be completed
+ */
+ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY))
+ writel(((MSIxIndex & 0x7) << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[MSIxIndex/8]);
+ else
+ writel((MSIxIndex << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[0]);
+ threshold_reply_count = 0;
+ }
+ }
+
+ if (!num_completed)
+ return IRQ_NONE;
+
+ wmb();
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ writel(((MSIxIndex & 0x7) << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[MSIxIndex/8]);
+ else
+ writel((MSIxIndex << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[0]);
+ megasas_check_and_restore_queue_depth(instance);
+ return IRQ_HANDLED;
+}
+
+/**
+ * megasas_complete_cmd_dpc_fusion - Completes command
+ * @instance: Adapter soft state
+ *
+ * Tasklet to complete cmds
+ */
+void
+megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
+{
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+ unsigned long flags;
+ u32 count, MSIxIndex;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+ /* If we have already declared adapter dead, donot complete cmds */
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
+ complete_cmd_fusion(instance, MSIxIndex);
+}
+
+/**
+ * megasas_isr_fusion - isr entry point
+ */
+irqreturn_t megasas_isr_fusion(int irq, void *devp)
+{
+ struct megasas_irq_context *irq_context = devp;
+ struct megasas_instance *instance = irq_context->instance;
+ u32 mfiStatus, fw_state, dma_state;
+
+ if (instance->mask_interrupts)
+ return IRQ_NONE;
+
+ if (!instance->msix_vectors) {
+ mfiStatus = instance->instancet->clear_intr(instance->reg_set);
+ if (!mfiStatus)
+ return IRQ_NONE;
+ }
+
+ /* If we are resetting, bail */
+ if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
+ instance->instancet->clear_intr(instance->reg_set);
+ return IRQ_HANDLED;
+ }
+
+ if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
+ instance->instancet->clear_intr(instance->reg_set);
+ /* If we didn't complete any commands, check for FW fault */
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ dma_state = instance->instancet->read_fw_status_reg
+ (instance->reg_set) & MFI_STATE_DMADONE;
+ if (instance->crash_dump_drv_support &&
+ instance->crash_dump_app_support) {
+ /* Start collecting crash, if DMA bit is done */
+ if ((fw_state == MFI_STATE_FAULT) && dma_state)
+ schedule_work(&instance->crash_init);
+ else if (fw_state == MFI_STATE_FAULT)
+ schedule_work(&instance->work_init);
+ } else if (fw_state == MFI_STATE_FAULT) {
+ printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
+ "for scsi%d\n", instance->host->host_no);
+ schedule_work(&instance->work_init);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
+ * @instance: Adapter soft state
+ * mfi_cmd: megasas_cmd pointer
+ *
+ */
+u8
+build_mpt_mfi_pass_thru(struct megasas_instance *instance,
+ struct megasas_cmd *mfi_cmd)
+{
+ struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
+ struct megasas_cmd_fusion *cmd;
+ struct fusion_context *fusion;
+ struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
+ u32 opcode;
+
+ cmd = megasas_get_cmd_fusion(instance);
+ if (!cmd)
+ return 1;
+
+ /* Save the smid. To be used for returning the cmd */
+ mfi_cmd->context.smid = cmd->index;
+ cmd->sync_cmd_idx = mfi_cmd->index;
+
+ /* Set this only for Blocked commands */
+ opcode = le32_to_cpu(mfi_cmd->frame->dcmd.opcode);
+ if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+ && (mfi_cmd->frame->dcmd.mbox.b[1] == 1))
+ mfi_cmd->is_wait_event = 1;
+
+ if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ mfi_cmd->is_wait_event = 1;
+
+ if (mfi_cmd->is_wait_event)
+ mfi_cmd->mpt_pthr_cmd_blocked = cmd;
+
+ /*
+ * For cmds where the flag is set, store the flag and check
+ * on completion. For cmds with this flag, don't call
+ * megasas_complete_cmd
+ */
+
+ if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
+ cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ fusion = instance->ctrl_context;
+ io_req = cmd->io_request;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
+ (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
+ sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+
+ mpi25_ieee_chain =
+ (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
+
+ io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
+ io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
+ SGL) / 4;
+ io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
+
+ mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
+
+ mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
+
+ mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME);
+
+ return 0;
+}
+
+/**
+ * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
+ * @instance: Adapter soft state
+ * @cmd: mfi cmd to build
+ *
+ */
+union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u16 index;
+
+ if (build_mpt_mfi_pass_thru(instance, cmd)) {
+ printk(KERN_ERR "Couldn't build MFI pass thru cmd\n");
+ return NULL;
+ }
+
+ index = cmd->context.smid;
+
+ req_desc = megasas_get_request_descriptor(instance, index - 1);
+
+ if (!req_desc)
+ return NULL;
+
+ req_desc->Words = 0;
+ req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+ req_desc->SCSIIO.SMID = cpu_to_le16(index);
+
+ return req_desc;
+}
+
+/**
+ * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
+ * @instance: Adapter soft state
+ * @cmd: mfi cmd pointer
+ *
+ */
+void
+megasas_issue_dcmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+ req_desc = build_mpt_cmd(instance, cmd);
+ if (!req_desc) {
+ printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
+ return;
+ }
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_ATTACHED);
+ instance->instancet->fire_cmd(instance, req_desc->u.low,
+ req_desc->u.high, instance->reg_set);
+}
+
+/**
+ * megasas_release_fusion - Reverses the FW initialization
+ * @intance: Adapter soft state
+ */
+void
+megasas_release_fusion(struct megasas_instance *instance)
+{
+ megasas_free_cmds(instance);
+ megasas_free_cmds_fusion(instance);
+
+ iounmap(instance->reg_set);
+
+ pci_release_selected_regions(instance->pdev, instance->bar);
+}
+
+/**
+ * megasas_read_fw_status_reg_fusion - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
+ * @instance: Controller's soft instance
+ * return: Number of allocated host crash buffers
+ */
+static void
+megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
+{
+ unsigned int i;
+
+ instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
+ for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
+ instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL,
+ instance->crash_buf_pages);
+ if (!instance->crash_buf[i]) {
+ dev_info(&instance->pdev->dev, "Firmware crash dump "
+ "memory allocation failed at index %d\n", i);
+ break;
+ }
+ memset(instance->crash_buf[i], 0,
+ ((1 << PAGE_SHIFT) << instance->crash_buf_pages));
+ }
+ instance->drv_buf_alloc = i;
+}
+
+/**
+ * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware
+ * @instance: Controller's soft instance
+ */
+void
+megasas_free_host_crash_buffer(struct megasas_instance *instance)
+{
+ unsigned int i
+;
+ for (i = 0; i < instance->drv_buf_alloc; i++) {
+ if (instance->crash_buf[i])
+ free_pages((ulong)instance->crash_buf[i],
+ instance->crash_buf_pages);
+ }
+ instance->drv_buf_index = 0;
+ instance->drv_buf_alloc = 0;
+ instance->fw_crash_state = UNAVAILABLE;
+ instance->fw_crash_buffer_size = 0;
+}
+
+/**
+ * megasas_adp_reset_fusion - For controller reset
+ * @regs: MFI register set
+ */
+static int
+megasas_adp_reset_fusion(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ return 0;
+}
+
+/**
+ * megasas_check_reset_fusion - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_fusion(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ return 0;
+}
+
+/* This function waits for outstanding commands on fusion to complete */
+int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+ int iotimeout, int *convert)
+{
+ int i, outstanding, retval = 0, hb_seconds_missed = 0;
+ u32 fw_state;
+
+ for (i = 0; i < resetwaittime; i++) {
+ /* Check if firmware is in fault state */
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ printk(KERN_WARNING "megasas: Found FW in FAULT state,"
+ " will reset adapter scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ /* If SR-IOV VF mode & heartbeat timeout, don't wait */
+ if (instance->requestorId && !iotimeout) {
+ retval = 1;
+ goto out;
+ }
+
+ /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
+ if (instance->requestorId && iotimeout) {
+ if (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ hb_seconds_missed = 0;
+ } else {
+ hb_seconds_missed++;
+ if (hb_seconds_missed ==
+ (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
+ printk(KERN_WARNING "megasas: SR-IOV:"
+ " Heartbeat never completed "
+ " while polling during I/O "
+ " timeout handling for "
+ "scsi%d.\n",
+ instance->host->host_no);
+ *convert = 1;
+ retval = 1;
+ goto out;
+ }
+ }
+ }
+
+ outstanding = atomic_read(&instance->fw_outstanding);
+ if (!outstanding)
+ goto out;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+ "commands to complete for scsi%d\n", i,
+ outstanding, instance->host->host_no);
+ megasas_complete_cmd_dpc_fusion(
+ (unsigned long)instance);
+ }
+ msleep(1000);
+ }
+
+ if (atomic_read(&instance->fw_outstanding)) {
+ printk("megaraid_sas: pending commands remain after waiting, "
+ "will reset adapter scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ }
+out:
+ return retval;
+}
+
+void megasas_reset_reply_desc(struct megasas_instance *instance)
+{
+ int i, count;
+ struct fusion_context *fusion;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+
+ fusion = instance->ctrl_context;
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ for (i = 0 ; i < count ; i++)
+ fusion->last_reply_idx[i] = 0;
+ reply_desc = fusion->reply_frames_desc;
+ for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
+ reply_desc->Words = ULLONG_MAX;
+}
+
+/* Check for a second path that is currently UP */
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ int i, j, retval = (DID_RESET << 16);
+
+ if (instance->mpio && instance->requestorId) {
+ for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
+ for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
+ if (megasas_mgmt_info.instance[i] &&
+ (megasas_mgmt_info.instance[i] != instance) &&
+ megasas_mgmt_info.instance[i]->mpio &&
+ megasas_mgmt_info.instance[i]->requestorId
+ &&
+ (megasas_mgmt_info.instance[i]->ld_ids[j]
+ == scmd->device->id)) {
+ retval = (DID_NO_CONNECT << 16);
+ goto out;
+ }
+ }
+out:
+ return retval;
+}
+
+/* Core fusion reset function */
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
+{
+ int retval = SUCCESS, i, j, retry = 0, convert = 0;
+ struct megasas_instance *instance;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd_mfi;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u32 host_diag, abs_state, status_reg, reset_adapter;
+ u32 io_timeout_in_crash_mode = 0;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+ fusion = instance->ctrl_context;
+
+ mutex_lock(&instance->reset_mutex);
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
+ "returning FAILED for scsi%d.\n",
+ instance->host->host_no);
+ mutex_unlock(&instance->reset_mutex);
+ return FAILED;
+ }
+ status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+ abs_state = status_reg & MFI_STATE_MASK;
+
+ /* IO timeout detected, forcibly put FW in FAULT state */
+ if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
+ instance->crash_dump_app_support && iotimeout) {
+ dev_info(&instance->pdev->dev, "IO timeout is detected, "
+ "forcibly FAULT Firmware\n");
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ status_reg = readl(&instance->reg_set->doorbell);
+ writel(status_reg | MFI_STATE_FORCE_OCR,
+ &instance->reg_set->doorbell);
+ readl(&instance->reg_set->doorbell);
+ mutex_unlock(&instance->reset_mutex);
+ do {
+ ssleep(3);
+ io_timeout_in_crash_mode++;
+ dev_dbg(&instance->pdev->dev, "waiting for [%d] "
+ "seconds for crash dump collection and OCR "
+ "to be done\n", (io_timeout_in_crash_mode * 3));
+ } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+ (io_timeout_in_crash_mode < 80));
+
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev, "OCR done for IO "
+ "timeout case\n");
+ retval = SUCCESS;
+ } else {
+ dev_info(&instance->pdev->dev, "Controller is not "
+ "operational after 240 seconds wait for IO "
+ "timeout case in FW crash dump mode\n do "
+ "OCR/kill adapter\n");
+ retval = megasas_reset_fusion(shost, 0);
+ }
+ return retval;
+ }
+
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+ set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
+ instance->instancet->disable_intr(instance);
+ msleep(1000);
+
+ /* First try waiting for commands to complete */
+ if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
+ &convert)) {
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ printk(KERN_WARNING "megaraid_sas: resetting fusion "
+ "adapter scsi%d.\n", instance->host->host_no);
+ if (convert)
+ iotimeout = 0;
+
+ /* Now return commands back to the OS */
+ for (i = 0 ; i < instance->max_fw_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ if (cmd_fusion->scmd) {
+ scsi_dma_unmap(cmd_fusion->scmd);
+ cmd_fusion->scmd->result =
+ megasas_check_mpio_paths(instance,
+ cmd_fusion->scmd);
+ cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ atomic_dec(&instance->fw_outstanding);
+ }
+ }
+
+ status_reg = instance->instancet->read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg & MFI_STATE_MASK;
+ reset_adapter = status_reg & MFI_RESET_ADAPTER;
+ if (instance->disableOnlineCtrlReset ||
+ (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
+ /* Reset not supported, kill adapter */
+ printk(KERN_WARNING "megaraid_sas: Reset not supported"
+ ", killing adapter scsi%d.\n",
+ instance->host->host_no);
+ megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ retval = FAILED;
+ goto out;
+ }
+
+ /* Let SR-IOV VF & PF sync up if there was a HB failure */
+ if (instance->requestorId && !iotimeout) {
+ msleep(MEGASAS_OCR_SETTLE_TIME_VF);
+ /* Look for a late HB update after VF settle time */
+ if (abs_state == MFI_STATE_OPERATIONAL &&
+ (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter)) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ printk(KERN_WARNING "megasas: SR-IOV:"
+ "Late FW heartbeat update for "
+ "scsi%d.\n",
+ instance->host->host_no);
+ } else {
+ /* In VF mode, first poll for FW ready */
+ for (i = 0;
+ i < (MEGASAS_RESET_WAIT_TIME * 1000);
+ i += 20) {
+ status_reg =
+ instance->instancet->
+ read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg &
+ MFI_STATE_MASK;
+ if (abs_state == MFI_STATE_READY) {
+ printk(KERN_WARNING "megasas"
+ ": SR-IOV: FW was found"
+ "to be in ready state "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ break;
+ }
+ msleep(20);
+ }
+ if (abs_state != MFI_STATE_READY) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "FW not in ready state after %d"
+ " seconds for scsi%d, status_reg = "
+ "0x%x.\n",
+ MEGASAS_RESET_WAIT_TIME,
+ instance->host->host_no,
+ status_reg);
+ megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ instance->adprecovery =
+ MEGASAS_HW_CRITICAL_ERROR;
+ retval = FAILED;
+ goto out;
+ }
+ }
+ }
+
+ /* Now try to reset the chip */
+ for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_1ST_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_2ND_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_3RD_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_4TH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_5TH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_6TH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+
+ /* Check that the diag write enable (DRWE) bit is on */
+ host_diag = readl(&instance->reg_set->fusion_host_diag);
+ retry = 0;
+ while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
+ msleep(100);
+ host_diag =
+ readl(&instance->reg_set->fusion_host_diag);
+ if (retry++ == 100) {
+ printk(KERN_WARNING "megaraid_sas: "
+ "Host diag unlock failed! "
+ "for scsi%d\n",
+ instance->host->host_no);
+ break;
+ }
+ }
+ if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
+ continue;
+
+ /* Send chip reset command */
+ writel(host_diag | HOST_DIAG_RESET_ADAPTER,
+ &instance->reg_set->fusion_host_diag);
+ msleep(3000);
+
+ /* Make sure reset adapter bit is cleared */
+ host_diag = readl(&instance->reg_set->fusion_host_diag);
+ retry = 0;
+ while (host_diag & HOST_DIAG_RESET_ADAPTER) {
+ msleep(100);
+ host_diag =
+ readl(&instance->reg_set->fusion_host_diag);
+ if (retry++ == 1000) {
+ printk(KERN_WARNING "megaraid_sas: "
+ "Diag reset adapter never "
+ "cleared for scsi%d!\n",
+ instance->host->host_no);
+ break;
+ }
+ }
+ if (host_diag & HOST_DIAG_RESET_ADAPTER)
+ continue;
+
+ abs_state =
+ instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ retry = 0;
+
+ while ((abs_state <= MFI_STATE_FW_INIT) &&
+ (retry++ < 1000)) {
+ msleep(100);
+ abs_state =
+ instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ }
+ if (abs_state <= MFI_STATE_FW_INIT) {
+ printk(KERN_WARNING "megaraid_sas: firmware "
+ "state < MFI_STATE_FW_INIT, state = "
+ "0x%x for scsi%d\n", abs_state,
+ instance->host->host_no);
+ continue;
+ }
+
+ /* Wait for FW to become ready */
+ if (megasas_transition_to_ready(instance, 1)) {
+ printk(KERN_WARNING "megaraid_sas: Failed to "
+ "transition controller to ready "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ continue;
+ }
+
+ megasas_reset_reply_desc(instance);
+ if (megasas_ioc_init_fusion(instance)) {
+ printk(KERN_WARNING "megaraid_sas: "
+ "megasas_ioc_init_fusion() failed!"
+ " for scsi%d\n",
+ instance->host->host_no);
+ continue;
+ }
+
+ /* Re-fire management commands */
+ for (j = 0 ; j < instance->max_fw_cmds; j++) {
+ cmd_fusion = fusion->cmd_list[j];
+ if (cmd_fusion->sync_cmd_idx !=
+ (u32)ULONG_MAX) {
+ cmd_mfi =
+ instance->
+ cmd_list[cmd_fusion->sync_cmd_idx];
+ if (cmd_mfi->frame->dcmd.opcode ==
+ cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
+ megasas_return_mfi_mpt_pthr(instance, cmd_mfi, cmd_fusion);
+ } else {
+ req_desc =
+ megasas_get_request_descriptor(
+ instance,
+ cmd_mfi->context.smid
+ -1);
+ if (!req_desc) {
+ printk(KERN_WARNING
+ "req_desc NULL"
+ " for scsi%d\n",
+ instance->host->host_no);
+ /* Return leaked MPT
+ frame */
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ } else {
+ instance->instancet->
+ fire_cmd(instance,
+ req_desc->
+ u.low,
+ req_desc->
+ u.high,
+ instance->
+ reg_set);
+ }
+ }
+ }
+ }
+
+ if (megasas_get_ctrl_info(instance)) {
+ dev_info(&instance->pdev->dev,
+ "Failed from %s %d\n",
+ __func__, __LINE__);
+ megaraid_sas_kill_hba(instance);
+ retval = FAILED;
+ }
+ /* Reset load balance info */
+ memset(fusion->load_balance_info, 0,
+ sizeof(struct LD_LOAD_BALANCE_INFO)
+ *MAX_LOGICAL_DRIVES_EXT);
+
+ if (!megasas_get_map_info(instance))
+ megasas_sync_map_info(instance);
+
+ clear_bit(MEGASAS_FUSION_IN_RESET,
+ &instance->reset_flags);
+ instance->instancet->enable_intr(instance);
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+
+ /* Restart SR-IOV heartbeat */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 0))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
+ /* Adapter reset completed successfully */
+ printk(KERN_WARNING "megaraid_sas: Reset "
+ "successful for scsi%d.\n",
+ instance->host->host_no);
+
+ if (instance->crash_dump_drv_support &&
+ instance->crash_dump_app_support)
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_ON);
+ else
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_OFF);
+
+ retval = SUCCESS;
+ goto out;
+ }
+ /* Reset failed, kill the adapter */
+ printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
+ "adapter scsi%d.\n", instance->host->host_no);
+ megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ retval = FAILED;
+ } else {
+ /* For VF: Restart HB timer if we didn't OCR */
+ if (instance->requestorId) {
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ }
+ clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ instance->instancet->enable_intr(instance);
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ }
+out:
+ clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ mutex_unlock(&instance->reset_mutex);
+ return retval;
+}
+
+/* Fusion Crash dump collection work queue */
+void megasas_fusion_crash_dump_wq(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance, crash_init);
+ u32 status_reg;
+ u8 partial_copy = 0;
+
+
+ status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+
+ /*
+ * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
+ * to host crash buffers
+ */
+ if (instance->drv_buf_index == 0) {
+ /* Buffer is already allocated for old Crash dump.
+ * Do OCR and do not wait for crash dump collection
+ */
+ if (instance->drv_buf_alloc) {
+ dev_info(&instance->pdev->dev, "earlier crash dump is "
+ "not yet copied by application, ignoring this "
+ "crash dump and initiating OCR\n");
+ status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+ writel(status_reg,
+ &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ return;
+ }
+ megasas_alloc_host_crash_buffer(instance);
+ dev_info(&instance->pdev->dev, "Number of host crash buffers "
+ "allocated: %d\n", instance->drv_buf_alloc);
+ }
+
+ /*
+ * Driver has allocated max buffers, which can be allocated
+ * and FW has more crash dump data, then driver will
+ * ignore the data.
+ */
+ if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
+ dev_info(&instance->pdev->dev, "Driver is done copying "
+ "the buffer: %d\n", instance->drv_buf_alloc);
+ status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+ partial_copy = 1;
+ } else {
+ memcpy(instance->crash_buf[instance->drv_buf_index],
+ instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
+ instance->drv_buf_index++;
+ status_reg &= ~MFI_STATE_DMADONE;
+ }
+
+ if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
+ dev_info(&instance->pdev->dev, "Crash Dump is available,number "
+ "of copied buffers: %d\n", instance->drv_buf_index);
+ instance->fw_crash_buffer_size = instance->drv_buf_index;
+ instance->fw_crash_state = AVAILABLE;
+ instance->drv_buf_index = 0;
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ if (!partial_copy)
+ megasas_reset_fusion(instance->host, 0);
+ } else {
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ }
+}
+
+
+/* Fusion OCR work queue */
+void megasas_fusion_ocr_wq(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance, work_init);
+
+ megasas_reset_fusion(instance->host, 0);
+}
+
+struct megasas_instance_template megasas_instance_template_fusion = {
+ .fire_cmd = megasas_fire_cmd_fusion,
+ .enable_intr = megasas_enable_intr_fusion,
+ .disable_intr = megasas_disable_intr_fusion,
+ .clear_intr = megasas_clear_intr_fusion,
+ .read_fw_status_reg = megasas_read_fw_status_reg_fusion,
+ .adp_reset = megasas_adp_reset_fusion,
+ .check_reset = megasas_check_reset_fusion,
+ .service_isr = megasas_isr_fusion,
+ .tasklet = megasas_complete_cmd_dpc_fusion,
+ .init_adapter = megasas_init_adapter_fusion,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
+ .issue_dcmd = megasas_issue_dcmd_fusion,
+};
diff --git a/kernel/drivers/scsi/megaraid/megaraid_sas_fusion.h b/kernel/drivers/scsi/megaraid/megaraid_sas_fusion.h
new file mode 100644
index 000000000..56e6db2d5
--- /dev/null
+++ b/kernel/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -0,0 +1,850 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2009-2013 LSI Corporation
+ * Copyright (c) 2013-2014 Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * FILE: megaraid_sas_fusion.h
+ *
+ * Authors: Avago Technologies
+ * Manoj Jose
+ * Sumant Patro
+ * Kashyap Desai <kashyap.desai@avagotech.com>
+ * Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ * San Jose, California 95131
+ */
+
+#ifndef _MEGARAID_SAS_FUSION_H_
+#define _MEGARAID_SAS_FUSION_H_
+
+/* Fusion defines */
+#define MEGASAS_MAX_SZ_CHAIN_FRAME 1024
+#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
+#define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
+#define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
+#define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
+#define MEGASAS_LOAD_BALANCE_FLAG 0x1
+#define MEGASAS_DCMD_MBOX_PEND_FLAG 0x1
+#define HOST_DIAG_WRITE_ENABLE 0x80
+#define HOST_DIAG_RESET_ADAPTER 0x4
+#define MEGASAS_FUSION_MAX_RESET_TRIES 3
+#define MAX_MSIX_QUEUES_FUSION 128
+
+/* Invader defines */
+#define MPI2_TYPE_CUDA 0x2
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
+#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
+#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
+
+/* T10 PI defines */
+#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
+#define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
+#define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9
+#define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB
+#define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
+#define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
+#define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
+
+#define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C)
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
+
+/*
+ * Raid context flags
+ */
+
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
+enum MR_RAID_FLAGS_IO_SUB_TYPE {
+ MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
+ MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
+};
+
+/*
+ * Request descriptor types
+ */
+#define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
+#define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1
+#define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
+#define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
+
+#define MEGASAS_FP_CMD_LEN 16
+#define MEGASAS_FUSION_IN_RESET 0
+#define THRESHOLD_REPLY_COUNT 50
+
+/*
+ * Raid Context structure which describes MegaRAID specific IO Parameters
+ * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
+ */
+
+struct RAID_CONTEXT {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 nseg:4;
+ u8 Type:4;
+#else
+ u8 Type:4;
+ u8 nseg:4;
+#endif
+ u8 resvd0;
+ u16 timeoutValue;
+ u8 regLockFlags;
+ u8 resvd1;
+ u16 VirtualDiskTgtId;
+ u64 regLockRowLBA;
+ u32 regLockLength;
+ u16 nextLMId;
+ u8 exStatus;
+ u8 status;
+ u8 RAIDFlags;
+ u8 numSGE;
+ u16 configSeqNum;
+ u8 spanArm;
+ u8 resvd2[3];
+};
+
+#define RAID_CTX_SPANARM_ARM_SHIFT (0)
+#define RAID_CTX_SPANARM_ARM_MASK (0x1f)
+
+#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
+#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
+
+/*
+ * define region lock types
+ */
+enum REGION_TYPE {
+ REGION_TYPE_UNUSED = 0,
+ REGION_TYPE_SHARED_READ = 1,
+ REGION_TYPE_SHARED_WRITE = 2,
+ REGION_TYPE_EXCLUSIVE = 3,
+};
+
+/* MPI2 defines */
+#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_VERSION_MAJOR (0x02)
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+#define MPI2_HEADER_VERSION_UNIT (0x10)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
+ MPI2_HEADER_VERSION_DEV)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+
+struct MPI25_IEEE_SGE_CHAIN64 {
+ u64 Address;
+ u32 Length;
+ u16 Reserved1;
+ u8 NextChainOffset;
+ u8 Flags;
+};
+
+struct MPI2_SGE_SIMPLE_UNION {
+ u32 FlagsLength;
+ union {
+ u32 Address32;
+ u64 Address64;
+ } u;
+};
+
+struct MPI2_SCSI_IO_CDB_EEDP32 {
+ u8 CDB[20]; /* 0x00 */
+ u32 PrimaryReferenceTag; /* 0x14 */
+ u16 PrimaryApplicationTag; /* 0x18 */
+ u16 PrimaryApplicationTagMask; /* 0x1A */
+ u32 TransferLength; /* 0x1C */
+};
+
+struct MPI2_SGE_CHAIN_UNION {
+ u16 Length;
+ u8 NextChainOffset;
+ u8 Flags;
+ union {
+ u32 Address32;
+ u64 Address64;
+ } u;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE32 {
+ u32 Address;
+ u32 FlagsLength;
+};
+
+struct MPI2_IEEE_SGE_CHAIN32 {
+ u32 Address;
+ u32 FlagsLength;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE64 {
+ u64 Address;
+ u32 Length;
+ u16 Reserved1;
+ u8 Reserved2;
+ u8 Flags;
+};
+
+struct MPI2_IEEE_SGE_CHAIN64 {
+ u64 Address;
+ u32 Length;
+ u16 Reserved1;
+ u8 Reserved2;
+ u8 Flags;
+};
+
+union MPI2_IEEE_SGE_SIMPLE_UNION {
+ struct MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ struct MPI2_IEEE_SGE_SIMPLE64 Simple64;
+};
+
+union MPI2_IEEE_SGE_CHAIN_UNION {
+ struct MPI2_IEEE_SGE_CHAIN32 Chain32;
+ struct MPI2_IEEE_SGE_CHAIN64 Chain64;
+};
+
+union MPI2_SGE_IO_UNION {
+ struct MPI2_SGE_SIMPLE_UNION MpiSimple;
+ struct MPI2_SGE_CHAIN_UNION MpiChain;
+ union MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ union MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+};
+
+union MPI2_SCSI_IO_CDB_UNION {
+ u8 CDB32[32];
+ struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ struct MPI2_SGE_SIMPLE_UNION SGE;
+};
+
+/*
+ * RAID SCSI IO Request Message
+ * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
+ */
+struct MPI2_RAID_SCSI_IO_REQUEST {
+ u16 DevHandle; /* 0x00 */
+ u8 ChainOffset; /* 0x02 */
+ u8 Function; /* 0x03 */
+ u16 Reserved1; /* 0x04 */
+ u8 Reserved2; /* 0x06 */
+ u8 MsgFlags; /* 0x07 */
+ u8 VP_ID; /* 0x08 */
+ u8 VF_ID; /* 0x09 */
+ u16 Reserved3; /* 0x0A */
+ u32 SenseBufferLowAddress; /* 0x0C */
+ u16 SGLFlags; /* 0x10 */
+ u8 SenseBufferLength; /* 0x12 */
+ u8 Reserved4; /* 0x13 */
+ u8 SGLOffset0; /* 0x14 */
+ u8 SGLOffset1; /* 0x15 */
+ u8 SGLOffset2; /* 0x16 */
+ u8 SGLOffset3; /* 0x17 */
+ u32 SkipCount; /* 0x18 */
+ u32 DataLength; /* 0x1C */
+ u32 BidirectionalDataLength; /* 0x20 */
+ u16 IoFlags; /* 0x24 */
+ u16 EEDPFlags; /* 0x26 */
+ u32 EEDPBlockSize; /* 0x28 */
+ u32 SecondaryReferenceTag; /* 0x2C */
+ u16 SecondaryApplicationTag; /* 0x30 */
+ u16 ApplicationTagTranslationMask; /* 0x32 */
+ u8 LUN[8]; /* 0x34 */
+ u32 Control; /* 0x3C */
+ union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
+ struct RAID_CONTEXT RaidContext; /* 0x60 */
+ union MPI2_SGE_IO_UNION SGL; /* 0x80 */
+};
+
+/*
+ * MPT RAID MFA IO Descriptor.
+ */
+struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
+ u32 RequestFlags:8;
+ u32 MessageAddress1:24;
+ u32 MessageAddress2;
+};
+
+/* Default Request Descriptor */
+struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 DescriptorTypeDependent; /* 0x06 */
+};
+
+/* High Priority Request Descriptor */
+struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 Reserved1; /* 0x06 */
+};
+
+/* SCSI IO Request Descriptor */
+struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 DevHandle; /* 0x06 */
+};
+
+/* SCSI Target Request Descriptor */
+struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 IoIndex; /* 0x06 */
+};
+
+/* RAID Accelerator Request Descriptor */
+struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 Reserved; /* 0x06 */
+};
+
+/* union of Request Descriptors */
+union MEGASAS_REQUEST_DESCRIPTOR_UNION {
+ struct MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
+ union {
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+ u64 Words;
+ };
+};
+
+/* Default Reply Descriptor */
+struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 DescriptorTypeDependent1; /* 0x02 */
+ u32 DescriptorTypeDependent2; /* 0x04 */
+};
+
+/* Address Reply Descriptor */
+struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u32 ReplyFrameAddress; /* 0x04 */
+};
+
+/* SCSI IO Success Reply Descriptor */
+struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 TaskTag; /* 0x04 */
+ u16 Reserved1; /* 0x06 */
+};
+
+/* TargetAssist Success Reply Descriptor */
+struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u8 SequenceNumber; /* 0x04 */
+ u8 Reserved1; /* 0x05 */
+ u16 IoIndex; /* 0x06 */
+};
+
+/* Target Command Buffer Reply Descriptor */
+struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u8 VP_ID; /* 0x02 */
+ u8 Flags; /* 0x03 */
+ u16 InitiatorDevHandle; /* 0x04 */
+ u16 IoIndex; /* 0x06 */
+};
+
+/* RAID Accelerator Success Reply Descriptor */
+struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u32 Reserved; /* 0x04 */
+};
+
+/* union of Reply Descriptors */
+union MPI2_REPLY_DESCRIPTORS_UNION {
+ struct MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ struct MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
+ RAIDAcceleratorSuccess;
+ u64 Words;
+};
+
+/* IOCInit Request message */
+struct MPI2_IOC_INIT_REQUEST {
+ u8 WhoInit; /* 0x00 */
+ u8 Reserved1; /* 0x01 */
+ u8 ChainOffset; /* 0x02 */
+ u8 Function; /* 0x03 */
+ u16 Reserved2; /* 0x04 */
+ u8 Reserved3; /* 0x06 */
+ u8 MsgFlags; /* 0x07 */
+ u8 VP_ID; /* 0x08 */
+ u8 VF_ID; /* 0x09 */
+ u16 Reserved4; /* 0x0A */
+ u16 MsgVersion; /* 0x0C */
+ u16 HeaderVersion; /* 0x0E */
+ u32 Reserved5; /* 0x10 */
+ u16 Reserved6; /* 0x14 */
+ u8 Reserved7; /* 0x16 */
+ u8 HostMSIxVectors; /* 0x17 */
+ u16 Reserved8; /* 0x18 */
+ u16 SystemRequestFrameSize; /* 0x1A */
+ u16 ReplyDescriptorPostQueueDepth; /* 0x1C */
+ u16 ReplyFreeQueueDepth; /* 0x1E */
+ u32 SenseBufferAddressHigh; /* 0x20 */
+ u32 SystemReplyAddressHigh; /* 0x24 */
+ u64 SystemRequestFrameBaseAddress; /* 0x28 */
+ u64 ReplyDescriptorPostQueueAddress;/* 0x30 */
+ u64 ReplyFreeQueueAddress; /* 0x38 */
+ u64 TimeStamp; /* 0x40 */
+};
+
+/* mrpriv defines */
+#define MR_PD_INVALID 0xFFFF
+#define MAX_SPAN_DEPTH 8
+#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
+#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
+#define MAX_ROW_SIZE 32
+#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
+#define MAX_LOGICAL_DRIVES 64
+#define MAX_LOGICAL_DRIVES_EXT 256
+#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
+#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
+#define MAX_ARRAYS 128
+#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_ARRAYS_EXT 256
+#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
+#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
+
+struct MR_DEV_HANDLE_INFO {
+ u16 curDevHdl;
+ u8 validHandles;
+ u8 reserved;
+ u16 devHandle[2];
+};
+
+struct MR_ARRAY_INFO {
+ u16 pd[MAX_RAIDMAP_ROW_SIZE];
+};
+
+struct MR_QUAD_ELEMENT {
+ u64 logStart;
+ u64 logEnd;
+ u64 offsetInSpan;
+ u32 diff;
+ u32 reserved1;
+};
+
+struct MR_SPAN_INFO {
+ u32 noElements;
+ u32 reserved1;
+ struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
+};
+
+struct MR_LD_SPAN {
+ u64 startBlk;
+ u64 numBlks;
+ u16 arrayRef;
+ u8 spanRowSize;
+ u8 spanRowDataSize;
+ u8 reserved[4];
+};
+
+struct MR_SPAN_BLOCK_INFO {
+ u64 num_rows;
+ struct MR_LD_SPAN span;
+ struct MR_SPAN_INFO block_span_info;
+};
+
+struct MR_LD_RAID {
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved4:7;
+ u32 fpNonRWCapable:1;
+ u32 fpReadAcrossStripe:1;
+ u32 fpWriteAcrossStripe:1;
+ u32 fpReadCapable:1;
+ u32 fpWriteCapable:1;
+ u32 encryptionType:8;
+ u32 pdPiMode:4;
+ u32 ldPiMode:4;
+ u32 reserved5:3;
+ u32 fpCapable:1;
+#else
+ u32 fpCapable:1;
+ u32 reserved5:3;
+ u32 ldPiMode:4;
+ u32 pdPiMode:4;
+ u32 encryptionType:8;
+ u32 fpWriteCapable:1;
+ u32 fpReadCapable:1;
+ u32 fpWriteAcrossStripe:1;
+ u32 fpReadAcrossStripe:1;
+ u32 fpNonRWCapable:1;
+ u32 reserved4:7;
+#endif
+ } capability;
+ u32 reserved6;
+ u64 size;
+ u8 spanDepth;
+ u8 level;
+ u8 stripeShift;
+ u8 rowSize;
+ u8 rowDataSize;
+ u8 writeMode;
+ u8 PRL;
+ u8 SRL;
+ u16 targetId;
+ u8 ldState;
+ u8 regTypeReqOnWrite;
+ u8 modFactor;
+ u8 regTypeReqOnRead;
+ u16 seqNum;
+
+ struct {
+ u32 ldSyncRequired:1;
+ u32 reserved:31;
+ } flags;
+
+ u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
+ u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
+ u8 reserved3[0x80-0x2D]; /* 0x2D */
+};
+
+struct MR_LD_SPAN_MAP {
+ struct MR_LD_RAID ldRaid;
+ u8 dataArmMap[MAX_RAIDMAP_ROW_SIZE];
+ struct MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH];
+};
+
+struct MR_FW_RAID_MAP {
+ u32 totalSize;
+ union {
+ struct {
+ u32 maxLd;
+ u32 maxSpanDepth;
+ u32 maxRowSize;
+ u32 maxPdCount;
+ u32 maxArrays;
+ } validationInfo;
+ u32 version[5];
+ };
+
+ u32 ldCount;
+ u32 Reserved1;
+ u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
+ MAX_RAIDMAP_VIEWS];
+ u8 fpPdIoTimeoutSec;
+ u8 reserved2[7];
+ struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
+ struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ struct MR_LD_SPAN_MAP ldSpanMap[1];
+};
+
+struct IO_REQUEST_INFO {
+ u64 ldStartBlock;
+ u32 numBlocks;
+ u16 ldTgtId;
+ u8 isRead;
+ u16 devHandle;
+ u64 pdBlock;
+ u8 fpOkForIo;
+ u8 IoforUnevenSpan;
+ u8 start_span;
+ u8 reserved;
+ u64 start_row;
+ u8 span_arm; /* span[7:5], arm[4:0] */
+ u8 pd_after_lb;
+};
+
+struct MR_LD_TARGET_SYNC {
+ u8 targetId;
+ u8 reserved;
+ u16 seqNum;
+};
+
+#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+#define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+
+struct megasas_register_set;
+struct megasas_instance;
+
+union desc_word {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+};
+
+struct megasas_cmd_fusion {
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+ dma_addr_t io_request_phys_addr;
+
+ union MPI2_SGE_IO_UNION *sg_frame;
+ dma_addr_t sg_frame_phys_addr;
+
+ u8 *sense;
+ dma_addr_t sense_phys_addr;
+
+ struct list_head list;
+ struct scsi_cmnd *scmd;
+ struct megasas_instance *instance;
+
+ u8 retry_for_fw_reset;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *request_desc;
+
+ /*
+ * Context for a MFI frame.
+ * Used to get the mfi cmd from list when a MFI cmd is completed
+ */
+ u32 sync_cmd_idx;
+ u32 index;
+ u8 flags;
+ u8 pd_r1_lb;
+};
+
+struct LD_LOAD_BALANCE_INFO {
+ u8 loadBalanceFlag;
+ u8 reserved1;
+ atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES];
+ u64 last_accessed_block[MAX_PHYSICAL_DEVICES];
+};
+
+/* SPAN_SET is info caclulated from span info from Raid map per LD */
+typedef struct _LD_SPAN_SET {
+ u64 log_start_lba;
+ u64 log_end_lba;
+ u64 span_row_start;
+ u64 span_row_end;
+ u64 data_strip_start;
+ u64 data_strip_end;
+ u64 data_row_start;
+ u64 data_row_end;
+ u8 strip_offset[MAX_SPAN_DEPTH];
+ u32 span_row_data_width;
+ u32 diff;
+ u32 reserved[2];
+} LD_SPAN_SET, *PLD_SPAN_SET;
+
+typedef struct LOG_BLOCK_SPAN_INFO {
+ LD_SPAN_SET span_set[MAX_SPAN_DEPTH];
+} LD_SPAN_INFO, *PLD_SPAN_INFO;
+
+struct MR_FW_RAID_MAP_ALL {
+ struct MR_FW_RAID_MAP raidMap;
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+} __attribute__ ((packed));
+
+struct MR_DRV_RAID_MAP {
+ /* total size of this structure, including this field.
+ * This feild will be manupulated by driver for ext raid map,
+ * else pick the value from firmware raid map.
+ */
+ u32 totalSize;
+
+ union {
+ struct {
+ u32 maxLd;
+ u32 maxSpanDepth;
+ u32 maxRowSize;
+ u32 maxPdCount;
+ u32 maxArrays;
+ } validationInfo;
+ u32 version[5];
+ };
+
+ /* timeout value used by driver in FP IOs*/
+ u8 fpPdIoTimeoutSec;
+ u8 reserved2[7];
+
+ u16 ldCount;
+ u16 arCount;
+ u16 spanCount;
+ u16 reserve3;
+
+ struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ struct MR_LD_SPAN_MAP ldSpanMap[1];
+
+};
+
+/* Driver raid map size is same as raid map ext
+ * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
+ * And it is mainly for code re-use purpose.
+ */
+struct MR_DRV_RAID_MAP_ALL {
+
+ struct MR_DRV_RAID_MAP raidMap;
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
+} __packed;
+
+
+
+struct MR_FW_RAID_MAP_EXT {
+ /* Not usred in new map */
+ u32 reserved;
+
+ union {
+ struct {
+ u32 maxLd;
+ u32 maxSpanDepth;
+ u32 maxRowSize;
+ u32 maxPdCount;
+ u32 maxArrays;
+ } validationInfo;
+ u32 version[5];
+ };
+
+ u8 fpPdIoTimeoutSec;
+ u8 reserved2[7];
+
+ u16 ldCount;
+ u16 arCount;
+ u16 spanCount;
+ u16 reserve3;
+
+ struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
+};
+
+struct fusion_context {
+ struct megasas_cmd_fusion **cmd_list;
+ struct list_head cmd_pool;
+
+ spinlock_t mpt_pool_lock;
+
+ dma_addr_t req_frames_desc_phys;
+ u8 *req_frames_desc;
+
+ struct dma_pool *io_request_frames_pool;
+ dma_addr_t io_request_frames_phys;
+ u8 *io_request_frames;
+
+ struct dma_pool *sg_dma_pool;
+ struct dma_pool *sense_dma_pool;
+
+ dma_addr_t reply_frames_desc_phys;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc;
+ struct dma_pool *reply_frames_desc_pool;
+
+ u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
+
+ u32 reply_q_depth;
+ u32 request_alloc_sz;
+ u32 reply_alloc_sz;
+ u32 io_frames_alloc_sz;
+
+ u16 max_sge_in_main_msg;
+ u16 max_sge_in_chain;
+
+ u8 chain_offset_io_request;
+ u8 chain_offset_mfi_pthru;
+
+ struct MR_FW_RAID_MAP_ALL *ld_map[2];
+ dma_addr_t ld_map_phys[2];
+
+ /*Non dma-able memory. Driver local copy.*/
+ struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
+
+ u32 max_map_sz;
+ u32 current_map_sz;
+ u32 drv_map_sz;
+ u32 drv_map_pages;
+ u8 fast_path_io;
+ struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT];
+ LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
+};
+
+union desc_value {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+};
+
+
+#endif /* _MEGARAID_SAS_FUSION_H_ */