From e09b41010ba33a20a87472ee821fa407a5b8da36 Mon Sep 17 00:00:00 2001 From: José Pekkarinen Date: Mon, 11 Apr 2016 10:41:07 +0300 Subject: These changes are the raw update to linux-4.4.6-rt14. Kernel sources are taken from kernel.org, and rt patch from the rt wiki download page. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen --- kernel/drivers/scsi/qla2xxx/Kconfig | 4 +- kernel/drivers/scsi/qla2xxx/qla_attr.c | 26 +- kernel/drivers/scsi/qla2xxx/qla_bsg.c | 7 +- kernel/drivers/scsi/qla2xxx/qla_dbg.c | 108 ++-- kernel/drivers/scsi/qla2xxx/qla_def.h | 35 +- kernel/drivers/scsi/qla2xxx/qla_gs.c | 52 +- kernel/drivers/scsi/qla2xxx/qla_init.c | 231 ++++---- kernel/drivers/scsi/qla2xxx/qla_iocb.c | 143 ++--- kernel/drivers/scsi/qla2xxx/qla_isr.c | 78 ++- kernel/drivers/scsi/qla2xxx/qla_mbx.c | 87 +-- kernel/drivers/scsi/qla2xxx/qla_mid.c | 7 +- kernel/drivers/scsi/qla2xxx/qla_mr.c | 22 +- kernel/drivers/scsi/qla2xxx/qla_nx.c | 168 +++--- kernel/drivers/scsi/qla2xxx/qla_nx2.c | 33 +- kernel/drivers/scsi/qla2xxx/qla_nx2.h | 6 +- kernel/drivers/scsi/qla2xxx/qla_os.c | 67 ++- kernel/drivers/scsi/qla2xxx/qla_sup.c | 16 +- kernel/drivers/scsi/qla2xxx/qla_target.c | 918 +++++++++++++++++++++++++----- kernel/drivers/scsi/qla2xxx/qla_target.h | 73 ++- kernel/drivers/scsi/qla2xxx/qla_tmpl.c | 43 +- kernel/drivers/scsi/qla2xxx/qla_version.h | 2 +- kernel/drivers/scsi/qla2xxx/tcm_qla2xxx.c | 428 +++----------- kernel/drivers/scsi/qla2xxx/tcm_qla2xxx.h | 6 +- 23 files changed, 1525 insertions(+), 1035 deletions(-) (limited to 'kernel/drivers/scsi/qla2xxx') diff --git a/kernel/drivers/scsi/qla2xxx/Kconfig b/kernel/drivers/scsi/qla2xxx/Kconfig index 33f60c92e..a0f732b13 100644 --- a/kernel/drivers/scsi/qla2xxx/Kconfig +++ b/kernel/drivers/scsi/qla2xxx/Kconfig @@ -32,10 +32,10 @@ config SCSI_QLA_FC They are also included in the linux-firmware tree as well. config TCM_QLA2XXX - tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" + tristate "TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs" depends on SCSI_QLA_FC && TARGET_CORE depends on LIBFC select BTREE default n ---help--- - Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs + Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs diff --git a/kernel/drivers/scsi/qla2xxx/qla_attr.c b/kernel/drivers/scsi/qla2xxx/qla_attr.c index 82b92c414..6b942d9e5 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_attr.c +++ b/kernel/drivers/scsi/qla2xxx/qla_attr.c @@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x706f, "Issuing MPI reset.\n"); - if (IS_QLA83XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { uint32_t idc_control; qla83xx_idc_lock(vha, 0); @@ -884,7 +884,6 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; int rval; - uint16_t actual_size; if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) return 0; @@ -901,7 +900,6 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, } do_read: - actual_size = 0; memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, @@ -1079,8 +1077,7 @@ qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - return scnprintf(buf, PAGE_SIZE, "%s\n", - vha->hw->model_desc ? vha->hw->model_desc : ""); + return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc); } static ssize_t @@ -1348,7 +1345,8 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) && + !IS_QLA27XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", @@ -1537,6 +1535,20 @@ qla2x00_allow_cna_fw_dump_store(struct device *dev, return strlen(buf); } +static ssize_t +qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", + ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]); +} + static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); @@ -1581,6 +1593,7 @@ static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR, qla2x00_allow_cna_fw_dump_show, qla2x00_allow_cna_fw_dump_store); +static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL); struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_driver_version, @@ -1614,6 +1627,7 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_diag_megabytes, &dev_attr_fw_dump_size, &dev_attr_allow_cna_fw_dump, + &dev_attr_pep_version, NULL, }; diff --git a/kernel/drivers/scsi/qla2xxx/qla_bsg.c b/kernel/drivers/scsi/qla2xxx/qla_bsg.c index 2e2bb6f45..c26acde79 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_bsg.c +++ b/kernel/drivers/scsi/qla2xxx/qla_bsg.c @@ -405,7 +405,7 @@ done: return rval; } -inline uint16_t +static inline uint16_t qla24xx_calc_ct_iocbs(uint16_t dsds) { uint16_t iocbs; @@ -1733,7 +1733,6 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job) struct Scsi_Host *host = bsg_job->shost; scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; - uint16_t thread_id; uint32_t rval = EXT_STATUS_OK; uint16_t req_sg_cnt = 0; uint16_t rsp_sg_cnt = 0; @@ -1790,8 +1789,6 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job) goto done; } - thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; - mutex_lock(&ha->selflogin_lock); if (vha->self_login_loop_id == 0) { /* Initialize all required fields of fcport */ @@ -2174,7 +2171,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job) { int ret = -EINVAL; struct fc_rport *rport; - fc_port_t *fcport = NULL; struct Scsi_Host *host; scsi_qla_host_t *vha; @@ -2183,7 +2179,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job) if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { rport = bsg_job->rport; - fcport = *(fc_port_t **) rport->dd_data; host = rport_to_shost(rport); vha = shost_priv(host); } else { diff --git a/kernel/drivers/scsi/qla2xxx/qla_dbg.c b/kernel/drivers/scsi/qla2xxx/qla_dbg.c index e9ae6b924..34dc9a356 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_dbg.c +++ b/kernel/drivers/scsi/qla2xxx/qla_dbg.c @@ -19,14 +19,14 @@ * | Device Discovery | 0x2016 | 0x2020-0x2022, | * | | | 0x2011-0x2012, | * | | | 0x2099-0x20a4 | - * | Queue Command and IO tracing | 0x3059 | 0x300b | + * | Queue Command and IO tracing | 0x3075 | 0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | * | | | 0x302d,0x3033 | * | | | 0x3036,0x3038 | * | | | 0x303a | * | DPC Thread | 0x4023 | 0x4002,0x4013 | - * | Async Events | 0x5087 | 0x502b-0x502f | + * | Async Events | 0x508a | 0x502b-0x502f | * | | | 0x5047 | * | | | 0x5084,0x5075 | * | | | 0x503d,0x5044 | @@ -67,10 +67,10 @@ * | | | 0xd031-0xd0ff | * | | | 0xd101-0xd1fe | * | | | 0xd214-0xd2fe | - * | Target Mode | 0xe079 | | - * | Target Mode Management | 0xf080 | 0xf002 | + * | Target Mode | 0xe080 | | + * | Target Mode Management | 0xf096 | 0xf002 | * | | | 0xf046-0xf049 | - * | Target Mode Task Management | 0x1000b | | + * | Target Mode Task Management | 0x1000d | | * ---------------------------------------------------------------------- */ @@ -117,7 +117,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, { int rval; uint32_t cnt, stat, timer, dwords, idx; - uint16_t mb0, mb1; + uint16_t mb0; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; dma_addr_t dump_dma = ha->gid_list_dma; uint32_t *dump = (uint32_t *)ha->gid_list; @@ -161,7 +161,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, &ha->mbx_cmd_flags); mb0 = RD_REG_WORD(®->mailbox0); - mb1 = RD_REG_WORD(®->mailbox1); + RD_REG_WORD(®->mailbox1); WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); @@ -486,7 +486,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) return ptr; *last_chain = &fcec->type; - fcec->type = __constant_htonl(DUMP_CHAIN_FCE); + fcec->type = htonl(DUMP_CHAIN_FCE); fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + fce_calc_size(ha->fce_bufs)); fcec->size = htonl(fce_calc_size(ha->fce_bufs)); @@ -527,7 +527,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, /* aqp = ha->atio_q_map[que]; */ q = ptr; *last_chain = &q->type; - q->type = __constant_htonl(DUMP_CHAIN_QUEUE); + q->type = htonl(DUMP_CHAIN_QUEUE); q->chain_size = htonl( sizeof(struct qla2xxx_mqueue_chain) + sizeof(struct qla2xxx_mqueue_header) + @@ -536,7 +536,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, /* Add header. */ qh = ptr; - qh->queue = __constant_htonl(TYPE_ATIO_QUEUE); + qh->queue = htonl(TYPE_ATIO_QUEUE); qh->number = htonl(que); qh->size = htonl(aqp->length * sizeof(request_t)); ptr += sizeof(struct qla2xxx_mqueue_header); @@ -571,7 +571,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) /* Add chain. */ q = ptr; *last_chain = &q->type; - q->type = __constant_htonl(DUMP_CHAIN_QUEUE); + q->type = htonl(DUMP_CHAIN_QUEUE); q->chain_size = htonl( sizeof(struct qla2xxx_mqueue_chain) + sizeof(struct qla2xxx_mqueue_header) + @@ -580,7 +580,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) /* Add header. */ qh = ptr; - qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE); + qh->queue = htonl(TYPE_REQUEST_QUEUE); qh->number = htonl(que); qh->size = htonl(req->length * sizeof(request_t)); ptr += sizeof(struct qla2xxx_mqueue_header); @@ -599,7 +599,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) /* Add chain. */ q = ptr; *last_chain = &q->type; - q->type = __constant_htonl(DUMP_CHAIN_QUEUE); + q->type = htonl(DUMP_CHAIN_QUEUE); q->chain_size = htonl( sizeof(struct qla2xxx_mqueue_chain) + sizeof(struct qla2xxx_mqueue_header) + @@ -608,7 +608,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) /* Add header. */ qh = ptr; - qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE); + qh->queue = htonl(TYPE_RESPONSE_QUEUE); qh->number = htonl(que); qh->size = htonl(rsp->length * sizeof(response_t)); ptr += sizeof(struct qla2xxx_mqueue_header); @@ -627,15 +627,15 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) uint32_t cnt, que_idx; uint8_t que_cnt; struct qla2xxx_mq_chain *mq = ptr; - device_reg_t __iomem *reg; + device_reg_t *reg; if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) return ptr; mq = ptr; *last_chain = &mq->type; - mq->type = __constant_htonl(DUMP_CHAIN_MQ); - mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); + mq->type = htonl(DUMP_CHAIN_MQ); + mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain)); que_cnt = ha->max_req_queues > ha->max_rsp_queues ? ha->max_req_queues : ha->max_rsp_queues; @@ -695,8 +695,10 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) flags = 0; +#ifndef __CHECKER__ if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); +#endif if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd002, @@ -832,8 +834,12 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) qla2xxx_dump_post_process(base_vha, rval); qla2300_fw_dump_failed: +#ifndef __CHECKER__ if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } /** @@ -859,8 +865,10 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) mb0 = mb2 = 0; flags = 0; +#ifndef __CHECKER__ if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); +#endif if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd004, @@ -1030,8 +1038,12 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) qla2xxx_dump_post_process(base_vha, rval); qla2100_fw_dump_failed: +#ifndef __CHECKER__ if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } void @@ -1039,7 +1051,6 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; - uint32_t risc_address; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t __iomem *dmp_reg; @@ -1047,7 +1058,6 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) uint16_t __iomem *mbx_reg; unsigned long flags; struct qla24xx_fw_dump *fw; - uint32_t ext_mem_cnt; void *nxt; void *nxt_chain; uint32_t *last_chain = NULL; @@ -1056,12 +1066,13 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) if (IS_P3P_TYPE(ha)) return; - risc_address = ext_mem_cnt = 0; flags = 0; ha->fw_dump_cap_flags = 0; +#ifndef __CHECKER__ if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); +#endif if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd006, @@ -1274,8 +1285,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) nxt_chain = (void *)ha->fw_dump + ha->chain_offset; nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); if (last_chain) { - ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); - *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); + ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); + *last_chain |= htonl(DUMP_CHAIN_LAST); } /* Adjust valid length. */ @@ -1285,8 +1296,12 @@ qla24xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla24xx_fw_dump_failed: +#ifndef __CHECKER__ if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } void @@ -1294,7 +1309,6 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; - uint32_t risc_address; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t __iomem *dmp_reg; @@ -1302,17 +1316,17 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) uint16_t __iomem *mbx_reg; unsigned long flags; struct qla25xx_fw_dump *fw; - uint32_t ext_mem_cnt; void *nxt, *nxt_chain; uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - risc_address = ext_mem_cnt = 0; flags = 0; ha->fw_dump_cap_flags = 0; +#ifndef __CHECKER__ if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); +#endif if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd008, @@ -1329,7 +1343,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) } fw = &ha->fw_dump->isp.isp25; qla2xxx_prep_dump(ha, ha->fw_dump); - ha->fw_dump->version = __constant_htonl(2); + ha->fw_dump->version = htonl(2); fw->host_status = htonl(RD_REG_DWORD(®->host_status)); @@ -1593,8 +1607,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); if (last_chain) { - ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); - *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); + ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); + *last_chain |= htonl(DUMP_CHAIN_LAST); } /* Adjust valid length. */ @@ -1604,8 +1618,12 @@ qla25xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla25xx_fw_dump_failed: +#ifndef __CHECKER__ if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } void @@ -1613,7 +1631,6 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; - uint32_t risc_address; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t __iomem *dmp_reg; @@ -1621,17 +1638,17 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) uint16_t __iomem *mbx_reg; unsigned long flags; struct qla81xx_fw_dump *fw; - uint32_t ext_mem_cnt; void *nxt, *nxt_chain; uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - risc_address = ext_mem_cnt = 0; flags = 0; ha->fw_dump_cap_flags = 0; +#ifndef __CHECKER__ if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); +#endif if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd00a, @@ -1914,8 +1931,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); if (last_chain) { - ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); - *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); + ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); + *last_chain |= htonl(DUMP_CHAIN_LAST); } /* Adjust valid length. */ @@ -1925,16 +1942,19 @@ qla81xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla81xx_fw_dump_failed: +#ifndef __CHECKER__ if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } void qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; - uint32_t cnt, reg_data; - uint32_t risc_address; + uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t __iomem *dmp_reg; @@ -1942,17 +1962,17 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) uint16_t __iomem *mbx_reg; unsigned long flags; struct qla83xx_fw_dump *fw; - uint32_t ext_mem_cnt; void *nxt, *nxt_chain; uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - risc_address = ext_mem_cnt = 0; flags = 0; ha->fw_dump_cap_flags = 0; +#ifndef __CHECKER__ if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); +#endif if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd00c, @@ -1979,16 +1999,16 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) WRT_REG_DWORD(®->iobase_addr, 0x6000); dmp_reg = ®->iobase_window; - reg_data = RD_REG_DWORD(dmp_reg); + RD_REG_DWORD(dmp_reg); WRT_REG_DWORD(dmp_reg, 0); dmp_reg = ®->unused_4_1[0]; - reg_data = RD_REG_DWORD(dmp_reg); + RD_REG_DWORD(dmp_reg); WRT_REG_DWORD(dmp_reg, 0); WRT_REG_DWORD(®->iobase_addr, 0x6010); dmp_reg = ®->unused_4_1[2]; - reg_data = RD_REG_DWORD(dmp_reg); + RD_REG_DWORD(dmp_reg); WRT_REG_DWORD(dmp_reg, 0); /* select PCR and disable ecc checking and correction */ @@ -2420,8 +2440,8 @@ copy_queue: nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); if (last_chain) { - ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); - *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); + ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT); + *last_chain |= htonl(DUMP_CHAIN_LAST); } /* Adjust valid length. */ @@ -2431,8 +2451,12 @@ qla83xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); qla83xx_fw_dump_failed: +#ifndef __CHECKER__ if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } /****************************************************************************/ diff --git a/kernel/drivers/scsi/qla2xxx/qla_def.h b/kernel/drivers/scsi/qla2xxx/qla_def.h index e86201d3b..388d79088 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_def.h +++ b/kernel/drivers/scsi/qla2xxx/qla_def.h @@ -274,6 +274,7 @@ #define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ struct req_que; +struct qla_tgt_sess; /* * (sd.h is not exported, hence local inclusion) @@ -2026,6 +2027,7 @@ typedef struct fc_port { uint16_t port_id; unsigned long retry_delay_timestamp; + struct qla_tgt_sess *tgt_session; } fc_port_t; #include "qla_mr.h" @@ -3059,6 +3061,7 @@ struct qla_hw_data { #define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031 #define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071 #define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271 +#define PCI_DEVICE_ID_QLOGIC_ISP2261 0x2261 uint32_t device_type; #define DT_ISP2100 BIT_0 @@ -3082,7 +3085,8 @@ struct qla_hw_data { #define DT_ISP8044 BIT_18 #define DT_ISP2071 BIT_19 #define DT_ISP2271 BIT_20 -#define DT_ISP_LAST (DT_ISP2271 << 1) +#define DT_ISP2261 BIT_21 +#define DT_ISP_LAST (DT_ISP2261 << 1) #define DT_T10_PI BIT_25 #define DT_IIDMA BIT_26 @@ -3114,6 +3118,7 @@ struct qla_hw_data { #define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) #define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071) #define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271) +#define IS_QLA2261(ha) (DT_MASK(ha) & DT_ISP2261) #define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ IS_QLA6312(ha) || IS_QLA6322(ha)) @@ -3122,7 +3127,7 @@ struct qla_hw_data { #define IS_QLA25XX(ha) (IS_QLA2532(ha)) #define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) #define IS_QLA84XX(ha) (IS_QLA8432(ha)) -#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha)) +#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha)) #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ IS_QLA84XX(ha)) #define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ @@ -3154,16 +3159,17 @@ struct qla_hw_data { /* Bit 21 of fw_attributes decides the MCTP capabilities */ #define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ ((ha)->fw_attributes_ext[0] & BIT_0)) -#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha)) -#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha)) +#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) +#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) -#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha)) +#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) -#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) +#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) #define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) #define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) +#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) /* HBA serial number */ uint8_t serial0; @@ -3286,6 +3292,7 @@ struct qla_hw_data { uint8_t mpi_version[3]; uint32_t mpi_capabilities; uint8_t phy_version[3]; + uint8_t pep_version[3]; /* Firmware dump template */ void *fw_dump_template; @@ -3418,9 +3425,9 @@ struct qla_hw_data { mempool_t *ctx_mempool; #define FCP_CMND_DMA_POOL_SIZE 512 - unsigned long nx_pcibase; /* Base I/O address */ - uint8_t *nxdb_rd_ptr; /* Doorbell read pointer */ - unsigned long nxdb_wr_ptr; /* Door bell write pointer */ + void __iomem *nx_pcibase; /* Base I/O address */ + void __iomem *nxdb_rd_ptr; /* Doorbell read pointer */ + void __iomem *nxdb_wr_ptr; /* Door bell write pointer */ uint32_t crb_win; uint32_t curr_window; @@ -3579,6 +3586,16 @@ typedef struct scsi_qla_host { uint16_t fcoe_fcf_idx; uint8_t fcoe_vn_port_mac[6]; + /* list of commands waiting on workqueue */ + struct list_head qla_cmd_list; + struct list_head qla_sess_op_cmd_list; + spinlock_t cmd_list_lock; + + /* Counter to detect races between ELS and RSCN events */ + atomic_t generation_tick; + /* Time when global fcport update has been scheduled */ + int total_fcport_update_gen; + uint32_t vp_abort_cnt; struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ diff --git a/kernel/drivers/scsi/qla2xxx/qla_gs.c b/kernel/drivers/scsi/qla2xxx/qla_gs.c index dccc4dcc3..94e8a8592 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_gs.c +++ b/kernel/drivers/scsi/qla2xxx/qla_gs.c @@ -35,10 +35,10 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) ms_pkt->entry_type = MS_IOCB_TYPE; ms_pkt->entry_count = 1; SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); - ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); + ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); - ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); - ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); + ms_pkt->cmd_dsd_count = cpu_to_le16(1); + ms_pkt->total_dsd_count = cpu_to_le16(2); ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); ms_pkt->req_bytecount = cpu_to_le32(req_size); @@ -74,10 +74,10 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) ct_pkt->entry_type = CT_IOCB_TYPE; ct_pkt->entry_count = 1; - ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS); + ct_pkt->nport_handle = cpu_to_le16(NPH_SNS); ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); - ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); - ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); + ct_pkt->cmd_dsd_count = cpu_to_le16(1); + ct_pkt->rsp_dsd_count = cpu_to_le16(1); ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); ct_pkt->cmd_byte_count = cpu_to_le32(req_size); @@ -142,7 +142,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, case CS_DATA_UNDERRUN: case CS_DATA_OVERRUN: /* Overrun? */ if (ct_rsp->header.response != - __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { + cpu_to_be16(CT_ACCEPT_RESPONSE)) { ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077, "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n", routine, vha->d_id.b.domain, @@ -1153,10 +1153,10 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, ms_pkt->entry_type = MS_IOCB_TYPE; ms_pkt->entry_count = 1; SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id); - ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); + ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG); ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); - ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); - ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); + ms_pkt->cmd_dsd_count = cpu_to_le16(1); + ms_pkt->total_dsd_count = cpu_to_le16(2); ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); ms_pkt->req_bytecount = cpu_to_le32(req_size); @@ -1193,8 +1193,8 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, ct_pkt->entry_count = 1; ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); - ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); - ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); + ct_pkt->cmd_dsd_count = cpu_to_le16(1); + ct_pkt->rsp_dsd_count = cpu_to_le16(1); ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); ct_pkt->cmd_byte_count = cpu_to_le32(req_size); @@ -1281,19 +1281,19 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) /* Prepare FDMI command arguments -- attribute block, attributes. */ memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE); - ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1); + ct_req->req.rhba.entry_count = cpu_to_be32(1); memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE); size = 2 * WWN_SIZE + 4 + 4; /* Attributes */ ct_req->req.rhba.attrs.count = - __constant_cpu_to_be32(FDMI_HBA_ATTR_COUNT); + cpu_to_be32(FDMI_HBA_ATTR_COUNT); entries = ct_req->req.rhba.hba_identifier; /* Nodename. */ eiter = entries + size; - eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME); - eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE); + eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME); + eiter->len = cpu_to_be16(4 + WWN_SIZE); memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE); size += 4 + WWN_SIZE; @@ -1302,7 +1302,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) /* Manufacturer. */ eiter = entries + size; - eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER); + eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER); alen = strlen(QLA2XXX_MANUFACTURER); snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer), "%s", "QLogic Corporation"); @@ -1315,7 +1315,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) /* Serial number. */ eiter = entries + size; - eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER); + eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER); if (IS_FWI2_CAPABLE(ha)) qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num, sizeof(eiter->a.serial_num)); @@ -1335,7 +1335,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) /* Model name. */ eiter = entries + size; - eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL); + eiter->type = cpu_to_be16(FDMI_HBA_MODEL); snprintf(eiter->a.model, sizeof(eiter->a.model), "%s", ha->model_number); alen = strlen(eiter->a.model); @@ -1348,7 +1348,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) /* Model description. */ eiter = entries + size; - eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); + eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc), "%s", ha->model_desc); alen = strlen(eiter->a.model_desc); @@ -1361,7 +1361,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) /* Hardware version. */ eiter = entries + size; - eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION); + eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION); if (!IS_FWI2_CAPABLE(ha)) { snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version), "HW:%s", ha->adapter_id); @@ -1385,7 +1385,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) /* Driver version. */ eiter = entries + size; - eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION); + eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION); snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version), "%s", qla2x00_version_str); alen = strlen(eiter->a.driver_version); @@ -1398,7 +1398,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) /* Option ROM version. */ eiter = entries + size; - eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); + eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version), "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); alen = strlen(eiter->a.orom_version); @@ -1411,7 +1411,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) /* Firmware version */ eiter = entries + size; - eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); + eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); ha->isp_ops->fw_version_str(vha, eiter->a.fw_version, sizeof(eiter->a.fw_version)); alen = strlen(eiter->a.fw_version); @@ -2484,8 +2484,8 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size, ct_pkt->entry_count = 1; ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); - ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); - ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); + ct_pkt->cmd_dsd_count = cpu_to_le16(1); + ct_pkt->rsp_dsd_count = cpu_to_le16(1); ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); ct_pkt->cmd_byte_count = cpu_to_le32(req_size); diff --git a/kernel/drivers/scsi/qla2xxx/qla_init.c b/kernel/drivers/scsi/qla2xxx/qla_init.c index 998498e23..e197c6f39 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_init.c +++ b/kernel/drivers/scsi/qla2xxx/qla_init.c @@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data) QLA_LOGIO_LOGIN_RETRIED : 0; qla2x00_post_async_login_done_work(fcport->vha, fcport, lio->u.logio.data); + } else if (sp->type == SRB_LOGOUT_CMD) { + qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); } } @@ -497,7 +499,10 @@ void qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { - qla2x00_mark_device_lost(vha, fcport, 1, 0); + /* Don't re-login in target mode */ + if (!fcport->tgt_session) + qla2x00_mark_device_lost(vha, fcport, 1, 0); + qlt_logo_completion_handler(fcport, data[0]); return; } @@ -708,7 +713,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00d4, "Unable to initialize ISP84XX.\n"); - qla84xx_put_chip(vha); + qla84xx_put_chip(vha); } } @@ -1127,7 +1132,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - uint32_t cnt, d2; + uint32_t cnt; uint16_t wd; static int abts_cnt; /* ISP abort retry counts */ int rval = QLA_SUCCESS; @@ -1159,7 +1164,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) udelay(100); /* Wait for firmware to complete NVRAM accesses. */ - d2 = (uint32_t) RD_REG_WORD(®->mailbox0); + RD_REG_WORD(®->mailbox0); for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { barrier(); @@ -1178,7 +1183,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) RD_REG_DWORD(®->mailbox0)); /* Wait for soft-reset to complete. */ - d2 = RD_REG_DWORD(®->ctrl_status); + RD_REG_DWORD(®->ctrl_status); for (cnt = 0; cnt < 6000000; cnt++) { barrier(); if ((RD_REG_DWORD(®->ctrl_status) & @@ -1221,7 +1226,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); RD_REG_DWORD(®->hccr); - d2 = (uint32_t) RD_REG_WORD(®->mailbox0); + RD_REG_WORD(®->mailbox0); for (cnt = 6000000; RD_REG_WORD(®->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { barrier(); @@ -1272,16 +1277,19 @@ qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) static void qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) { - struct qla_hw_data *ha = vha->hw; uint32_t wd32 = 0; uint delta_msec = 100; uint elapsed_msec = 0; uint timeout_msec; ulong n; - if (!IS_QLA25XX(ha) && !IS_QLA2031(ha)) + if (vha->hw->pdev->subsystem_device != 0x0175 && + vha->hw->pdev->subsystem_device != 0x0240) return; + WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); + udelay(100); + attempt: timeout_msec = TIMEOUT_SEMAPHORE; n = timeout_msec / delta_msec; @@ -1538,7 +1546,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x11000 + 1) * sizeof(uint16_t); } else if (IS_FWI2_CAPABLE(ha)) { - if (IS_QLA83XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); else if (IS_QLA81XX(ha)) fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); @@ -1550,7 +1558,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x100000 + 1) * sizeof(uint32_t); if (ha->mqenable) { - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) mq_size = sizeof(struct qla2xxx_mq_chain); /* * Allocate maximum buffer size for all queues. @@ -1685,7 +1693,7 @@ allocate: ha->fw_dump->signature[1] = 'L'; ha->fw_dump->signature[2] = 'G'; ha->fw_dump->signature[3] = 'C'; - ha->fw_dump->version = __constant_htonl(1); + ha->fw_dump->version = htonl(1); ha->fw_dump->fixed_size = htonl(fixed_size); ha->fw_dump->mem_size = htonl(mem_size); @@ -2065,8 +2073,8 @@ qla2x00_config_rings(struct scsi_qla_host *vha) struct rsp_que *rsp = ha->rsp_q_map[0]; /* Setup ring parameters in initialization control block. */ - ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); - ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0); + ha->init_cb->request_q_outpointer = cpu_to_le16(0); + ha->init_cb->response_q_inpointer = cpu_to_le16(0); ha->init_cb->request_q_length = cpu_to_le16(req->length); ha->init_cb->response_q_length = cpu_to_le16(rsp->length); ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); @@ -2085,7 +2093,7 @@ void qla24xx_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; - device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0); + device_reg_t *reg = ISP_QUE_REG(ha, 0); struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; struct qla_msix_entry *msix; struct init_cb_24xx *icb; @@ -2095,8 +2103,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha) /* Setup ring parameters in initialization control block. */ icb = (struct init_cb_24xx *)ha->init_cb; - icb->request_q_outpointer = __constant_cpu_to_le16(0); - icb->response_q_inpointer = __constant_cpu_to_le16(0); + icb->request_q_outpointer = cpu_to_le16(0); + icb->response_q_inpointer = cpu_to_le16(0); icb->request_q_length = cpu_to_le16(req->length); icb->response_q_length = cpu_to_le16(rsp->length); icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); @@ -2105,18 +2113,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha) icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); /* Setup ATIO queue dma pointers for target mode */ - icb->atio_q_inpointer = __constant_cpu_to_le16(0); + icb->atio_q_inpointer = cpu_to_le16(0); icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); if (IS_SHADOW_REG_CAPABLE(ha)) - icb->firmware_options_2 |= - __constant_cpu_to_le32(BIT_30|BIT_29); + icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29); if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { - icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); - icb->rid = __constant_cpu_to_le16(rid); + icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS); + icb->rid = cpu_to_le16(rid); if (ha->flags.msix_enabled) { msix = &ha->msix_entries[1]; ql_dbg(ql_dbg_init, vha, 0x00fd, @@ -2126,26 +2133,22 @@ qla24xx_config_rings(struct scsi_qla_host *vha) } /* Use alternate PCI bus number */ if (MSB(rid)) - icb->firmware_options_2 |= - __constant_cpu_to_le32(BIT_19); + icb->firmware_options_2 |= cpu_to_le32(BIT_19); /* Use alternate PCI devfn */ if (LSB(rid)) - icb->firmware_options_2 |= - __constant_cpu_to_le32(BIT_18); + icb->firmware_options_2 |= cpu_to_le32(BIT_18); /* Use Disable MSIX Handshake mode for capable adapters */ if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && (ha->flags.msix_enabled)) { - icb->firmware_options_2 &= - __constant_cpu_to_le32(~BIT_22); + icb->firmware_options_2 &= cpu_to_le32(~BIT_22); ha->flags.disable_msix_handshake = 1; ql_dbg(ql_dbg_init, vha, 0x00fe, "MSIX Handshake Disable Mode turned on.\n"); } else { - icb->firmware_options_2 |= - __constant_cpu_to_le32(BIT_22); + icb->firmware_options_2 |= cpu_to_le32(BIT_22); } - icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); + icb->firmware_options_2 |= cpu_to_le32(BIT_23); WRT_REG_DWORD(®->isp25mq.req_q_in, 0); WRT_REG_DWORD(®->isp25mq.req_q_out, 0); @@ -2189,7 +2192,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) /* Clear outstanding commands array. */ for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; - if (!req) + if (!req || !test_bit(que, ha->req_qid_map)) continue; req->out_ptr = (void *)(req->ring + req->length); *req->out_ptr = 0; @@ -2206,7 +2209,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) for (que = 0; que < ha->max_rsp_queues; que++) { rsp = ha->rsp_q_map[que]; - if (!rsp) + if (!rsp || !test_bit(que, ha->rsp_qid_map)) continue; rsp->in_ptr = (void *)(rsp->ring + rsp->length); *rsp->in_ptr = 0; @@ -2243,7 +2246,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) } if (IS_FWI2_CAPABLE(ha)) { - mid_init_cb->options = __constant_cpu_to_le16(BIT_1); + mid_init_cb->options = cpu_to_le16(BIT_1); mid_init_cb->init_cb.execution_throttle = cpu_to_le16(ha->fw_xcb_count); /* D-Port Status */ @@ -2672,8 +2675,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) nv->frame_payload_size = 1024; } - nv->max_iocb_allocation = __constant_cpu_to_le16(256); - nv->execution_throttle = __constant_cpu_to_le16(16); + nv->max_iocb_allocation = cpu_to_le16(256); + nv->execution_throttle = cpu_to_le16(16); nv->retry_count = 8; nv->retry_delay = 1; @@ -2691,7 +2694,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) nv->host_p[1] = BIT_2; nv->reset_delay = 5; nv->port_down_retry_count = 8; - nv->max_luns_per_target = __constant_cpu_to_le16(8); + nv->max_luns_per_target = cpu_to_le16(8); nv->link_down_timeout = 60; rval = 1; @@ -2819,7 +2822,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) memcpy(vha->node_name, icb->node_name, WWN_SIZE); memcpy(vha->port_name, icb->port_name, WWN_SIZE); - icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); + icb->execution_throttle = cpu_to_le16(0xFFFF); ha->retry_count = nv->retry_count; @@ -2871,10 +2874,10 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) if (ql2xloginretrycount) ha->login_retry_count = ql2xloginretrycount; - icb->lun_enables = __constant_cpu_to_le16(0); + icb->lun_enables = cpu_to_le16(0); icb->command_resource_count = 0; icb->immediate_notify_resource_count = 0; - icb->timeout = __constant_cpu_to_le16(0); + icb->timeout = cpu_to_le16(0); if (IS_QLA2100(ha) || IS_QLA2200(ha)) { /* Enable RIO */ @@ -2922,24 +2925,14 @@ qla2x00_rport_del(void *data) { fc_port_t *fcport = data; struct fc_rport *rport; - scsi_qla_host_t *vha = fcport->vha; unsigned long flags; - unsigned long vha_flags; spin_lock_irqsave(fcport->vha->host->host_lock, flags); rport = fcport->drport ? fcport->drport: fcport->rport; fcport->drport = NULL; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); - if (rport) { + if (rport) fc_remote_port_delete(rport); - /* - * Release the target mode FC NEXUS in qla_target.c code - * if target mod is enabled. - */ - spin_lock_irqsave(&vha->hw->hardware_lock, vha_flags); - qlt_fc_port_deleted(vha, fcport); - spin_unlock_irqrestore(&vha->hw->hardware_lock, vha_flags); - } } /** @@ -3345,8 +3338,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) if (IS_QLAFX00(vha->hw)) { qla2x00_set_fcport_state(fcport, FCS_ONLINE); - qla2x00_reg_remote_port(vha, fcport); - return; + goto reg_port; } fcport->login_retry = 0; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); @@ -3354,7 +3346,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_set_fcport_state(fcport, FCS_ONLINE); qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); - qla2x00_reg_remote_port(vha, fcport); + +reg_port: + if (qla_ini_mode_enabled(vha)) + qla2x00_reg_remote_port(vha, fcport); + else { + /* + * Create target mode FC NEXUS in qla_target.c + */ + qlt_fc_port_added(vha, fcport); + } } /* @@ -3379,6 +3380,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) LIST_HEAD(new_fcports); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + int discovery_gen; /* If FL port exists, then SNS is present */ if (IS_FWI2_CAPABLE(ha)) @@ -3449,6 +3451,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) fcport->scan_state = QLA_FCPORT_SCAN; } + /* Mark the time right before querying FW for connected ports. + * This process is long, asynchronous and by the time it's done, + * collected information might not be accurate anymore. E.g. + * disconnected port might have re-connected and a brand new + * session has been created. In this case session's generation + * will be newer than discovery_gen. */ + qlt_do_generation_tick(vha, &discovery_gen); + rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); if (rval != QLA_SUCCESS) break; @@ -3500,7 +3510,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) atomic_read(&fcport->state), fcport->flags, fcport->fc4_type, fcport->scan_state); - qlt_fc_port_deleted(vha, fcport); + qlt_fc_port_deleted(vha, fcport, + discovery_gen); } } } @@ -3945,12 +3956,10 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t *next_loopid) { int rval; - int retry; uint8_t opts; struct qla_hw_data *ha = vha->hw; rval = QLA_SUCCESS; - retry = 0; if (IS_ALOGIO_CAPABLE(ha)) { if (fcport->flags & FCF_ASYNC_SENT) @@ -4277,6 +4286,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) atomic_read(&fcport->state) != FCS_UNCONFIGURED) { spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_rport_del(fcport); + + /* + * Release the target mode FC NEXUS in + * qla_target.c, if target mod is enabled. + */ + qlt_fc_port_deleted(vha, fcport, + base_vha->total_fcport_update_gen); + spin_lock_irqsave(&ha->vport_slock, flags); } } @@ -4944,7 +4961,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) for (i = 1; i < ha->max_rsp_queues; i++) { rsp = ha->rsp_q_map[i]; - if (rsp) { + if (rsp && test_bit(i, ha->rsp_qid_map)) { rsp->options &= ~BIT_0; ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != QLA_SUCCESS) @@ -4959,8 +4976,8 @@ qla25xx_init_queues(struct qla_hw_data *ha) } for (i = 1; i < ha->max_req_queues; i++) { req = ha->req_q_map[i]; - if (req) { - /* Clear outstanding commands array. */ + if (req && test_bit(i, ha->req_qid_map)) { + /* Clear outstanding commands array. */ req->options &= ~BIT_0; ret = qla25xx_init_req_que(base_vha, req); if (ret != QLA_SUCCESS) @@ -5096,7 +5113,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) /* Bad NVRAM data, set defaults parameters. */ if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' || - nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { + nv->nvram_version < cpu_to_le16(ICB_VERSION)) { /* Reset NVRAM data. */ ql_log(ql_log_warn, vha, 0x006b, "Inconsistent NVRAM detected: checksum=0x%x id=%c " @@ -5109,12 +5126,12 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) * Set default initialization control block. */ memset(nv, 0, ha->nvram_size); - nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); - nv->version = __constant_cpu_to_le16(ICB_VERSION); + nv->nvram_version = cpu_to_le16(ICB_VERSION); + nv->version = cpu_to_le16(ICB_VERSION); nv->frame_payload_size = 2048; - nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); - nv->exchange_count = __constant_cpu_to_le16(0); - nv->hard_address = __constant_cpu_to_le16(124); + nv->execution_throttle = cpu_to_le16(0xFFFF); + nv->exchange_count = cpu_to_le16(0); + nv->hard_address = cpu_to_le16(124); nv->port_name[0] = 0x21; nv->port_name[1] = 0x00 + ha->port_no + 1; nv->port_name[2] = 0x00; @@ -5132,29 +5149,29 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) nv->node_name[6] = 0x55; nv->node_name[7] = 0x86; qla24xx_nvram_wwn_from_ofw(vha, nv); - nv->login_retry_count = __constant_cpu_to_le16(8); - nv->interrupt_delay_timer = __constant_cpu_to_le16(0); - nv->login_timeout = __constant_cpu_to_le16(0); + nv->login_retry_count = cpu_to_le16(8); + nv->interrupt_delay_timer = cpu_to_le16(0); + nv->login_timeout = cpu_to_le16(0); nv->firmware_options_1 = - __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); - nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); - nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); - nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); - nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); - nv->efi_parameters = __constant_cpu_to_le32(0); + cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); + nv->firmware_options_2 = cpu_to_le32(2 << 4); + nv->firmware_options_2 |= cpu_to_le32(BIT_12); + nv->firmware_options_3 = cpu_to_le32(2 << 13); + nv->host_p = cpu_to_le32(BIT_11|BIT_10); + nv->efi_parameters = cpu_to_le32(0); nv->reset_delay = 5; - nv->max_luns_per_target = __constant_cpu_to_le16(128); - nv->port_down_retry_count = __constant_cpu_to_le16(30); - nv->link_down_timeout = __constant_cpu_to_le16(30); + nv->max_luns_per_target = cpu_to_le16(128); + nv->port_down_retry_count = cpu_to_le16(30); + nv->link_down_timeout = cpu_to_le16(30); rval = 1; } if (!qla_ini_mode_enabled(vha)) { /* Don't enable full login after initial LIP */ - nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); + nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Don't enable LIP full login for initiator */ - nv->host_p &= __constant_cpu_to_le32(~BIT_10); + nv->host_p &= cpu_to_le32(~BIT_10); } qlt_24xx_config_nvram_stage1(vha, nv); @@ -5188,14 +5205,14 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) qlt_24xx_config_nvram_stage2(vha, icb); - if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { + if (nv->host_p & cpu_to_le32(BIT_15)) { /* Use alternate WWN? */ memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); } /* Prepare nodename */ - if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { + if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { /* * Firmware will apply the following mask if the nodename was * not provided. @@ -5227,7 +5244,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) memcpy(vha->node_name, icb->node_name, WWN_SIZE); memcpy(vha->port_name, icb->port_name, WWN_SIZE); - icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); + icb->execution_throttle = cpu_to_le16(0xFFFF); ha->retry_count = le16_to_cpu(nv->login_retry_count); @@ -5235,7 +5252,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) nv->login_timeout = cpu_to_le16(ql2xlogintimeout); if (le16_to_cpu(nv->login_timeout) < 4) - nv->login_timeout = __constant_cpu_to_le16(4); + nv->login_timeout = cpu_to_le16(4); ha->login_timeout = le16_to_cpu(nv->login_timeout); icb->login_timeout = nv->login_timeout; @@ -5286,7 +5303,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? le16_to_cpu(icb->interrupt_delay_timer): 2; } - icb->firmware_options_2 &= __constant_cpu_to_le32( + icb->firmware_options_2 &= cpu_to_le32( ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); vha->flags.process_response_queue = 0; if (ha->zio_mode != QLA_ZIO_DISABLED) { @@ -6042,7 +6059,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) /* Bad NVRAM data, set defaults parameters. */ if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' || - nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { + nv->nvram_version < cpu_to_le16(ICB_VERSION)) { /* Reset NVRAM data. */ ql_log(ql_log_info, vha, 0x0073, "Inconsistent NVRAM detected: checksum=0x%x id=%c " @@ -6056,11 +6073,11 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) * Set default initialization control block. */ memset(nv, 0, ha->nvram_size); - nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); - nv->version = __constant_cpu_to_le16(ICB_VERSION); + nv->nvram_version = cpu_to_le16(ICB_VERSION); + nv->version = cpu_to_le16(ICB_VERSION); nv->frame_payload_size = 2048; - nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); - nv->exchange_count = __constant_cpu_to_le16(0); + nv->execution_throttle = cpu_to_le16(0xFFFF); + nv->exchange_count = cpu_to_le16(0); nv->port_name[0] = 0x21; nv->port_name[1] = 0x00 + ha->port_no + 1; nv->port_name[2] = 0x00; @@ -6077,20 +6094,20 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->node_name[5] = 0x1c; nv->node_name[6] = 0x55; nv->node_name[7] = 0x86; - nv->login_retry_count = __constant_cpu_to_le16(8); - nv->interrupt_delay_timer = __constant_cpu_to_le16(0); - nv->login_timeout = __constant_cpu_to_le16(0); + nv->login_retry_count = cpu_to_le16(8); + nv->interrupt_delay_timer = cpu_to_le16(0); + nv->login_timeout = cpu_to_le16(0); nv->firmware_options_1 = - __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); - nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); - nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); - nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); - nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); - nv->efi_parameters = __constant_cpu_to_le32(0); + cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); + nv->firmware_options_2 = cpu_to_le32(2 << 4); + nv->firmware_options_2 |= cpu_to_le32(BIT_12); + nv->firmware_options_3 = cpu_to_le32(2 << 13); + nv->host_p = cpu_to_le32(BIT_11|BIT_10); + nv->efi_parameters = cpu_to_le32(0); nv->reset_delay = 5; - nv->max_luns_per_target = __constant_cpu_to_le16(128); - nv->port_down_retry_count = __constant_cpu_to_le16(30); - nv->link_down_timeout = __constant_cpu_to_le16(180); + nv->max_luns_per_target = cpu_to_le16(128); + nv->port_down_retry_count = cpu_to_le16(30); + nv->link_down_timeout = cpu_to_le16(180); nv->enode_mac[0] = 0x00; nv->enode_mac[1] = 0xC0; nv->enode_mac[2] = 0xDD; @@ -6149,13 +6166,13 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) qlt_81xx_config_nvram_stage2(vha, icb); /* Use alternate WWN? */ - if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { + if (nv->host_p & cpu_to_le32(BIT_15)) { memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); } /* Prepare nodename */ - if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { + if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { /* * Firmware will apply the following mask if the nodename was * not provided. @@ -6184,7 +6201,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) memcpy(vha->node_name, icb->node_name, WWN_SIZE); memcpy(vha->port_name, icb->port_name, WWN_SIZE); - icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); + icb->execution_throttle = cpu_to_le16(0xFFFF); ha->retry_count = le16_to_cpu(nv->login_retry_count); @@ -6192,7 +6209,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) nv->login_timeout = cpu_to_le16(ql2xlogintimeout); if (le16_to_cpu(nv->login_timeout) < 4) - nv->login_timeout = __constant_cpu_to_le16(4); + nv->login_timeout = cpu_to_le16(4); ha->login_timeout = le16_to_cpu(nv->login_timeout); icb->login_timeout = nv->login_timeout; @@ -6238,7 +6255,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) /* if not running MSI-X we need handshaking on interrupts */ if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha))) - icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22); + icb->firmware_options_2 |= cpu_to_le32(BIT_22); /* Enable ZIO. */ if (!vha->flags.init_done) { @@ -6247,7 +6264,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? le16_to_cpu(icb->interrupt_delay_timer): 2; } - icb->firmware_options_2 &= __constant_cpu_to_le32( + icb->firmware_options_2 &= cpu_to_le32( ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); vha->flags.process_response_queue = 0; if (ha->zio_mode != QLA_ZIO_DISABLED) { diff --git a/kernel/drivers/scsi/qla2xxx/qla_iocb.c b/kernel/drivers/scsi/qla2xxx/qla_iocb.c index a1ab25fca..c49df34e9 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_iocb.c +++ b/kernel/drivers/scsi/qla2xxx/qla_iocb.c @@ -108,8 +108,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) cont_pkt = (cont_entry_t *)req->ring_ptr; /* Load packet defaults. */ - *((uint32_t *)(&cont_pkt->entry_type)) = - __constant_cpu_to_le32(CONTINUE_TYPE); + *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE); return (cont_pkt); } @@ -138,8 +137,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) /* Load packet defaults. */ *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ? - __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) : - __constant_cpu_to_le32(CONTINUE_A64_TYPE); + cpu_to_le32(CONTINUE_A64_TYPE_FX00) : + cpu_to_le32(CONTINUE_A64_TYPE); return (cont_pkt); } @@ -204,11 +203,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, /* Update entry type to indicate Command Type 2 IOCB */ *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_TYPE); + cpu_to_le32(COMMAND_TYPE); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return; } @@ -261,12 +260,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, cmd = GET_CMD_SP(sp); /* Update entry type to indicate Command Type 3 IOCB */ - *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_A64_TYPE); + *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return; } @@ -310,7 +308,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, int qla2x00_start_scsi(srb_t *sp) { - int ret, nseg; + int nseg; unsigned long flags; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; @@ -327,7 +325,6 @@ qla2x00_start_scsi(srb_t *sp) struct rsp_que *rsp; /* Setup device pointers. */ - ret = 0; vha = sp->fcport->vha; ha = vha->hw; reg = &ha->iobase->isp; @@ -403,7 +400,7 @@ qla2x00_start_scsi(srb_t *sp) /* Set target ID and LUN number*/ SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); cmd_pkt->lun = cpu_to_le16(cmd->device->lun); - cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); + cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); /* Load SCSI command packet. */ memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); @@ -454,7 +451,7 @@ void qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) { struct qla_hw_data *ha = vha->hw; - device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); + device_reg_t *reg = ISP_QUE_REG(ha, req->id); if (IS_P3P_TYPE(ha)) { qla82xx_start_iocbs(vha); @@ -597,12 +594,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, cmd = GET_CMD_SP(sp); /* Update entry type to indicate Command Type 3 IOCB */ - *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_TYPE_6); + *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return 0; } @@ -611,13 +607,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_WRITE_DATA); + cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); vha->qla_stats.output_bytes += scsi_bufflen(cmd); vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_READ_DATA); + cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); vha->qla_stats.input_bytes += scsi_bufflen(cmd); vha->qla_stats.input_requests++; } @@ -680,7 +674,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, * * Returns the number of dsd list needed to store @dsds. */ -inline uint16_t +static inline uint16_t qla24xx_calc_dsd_lists(uint16_t dsds) { uint16_t dsd_lists = 0; @@ -700,7 +694,7 @@ qla24xx_calc_dsd_lists(uint16_t dsds) * @cmd_pkt: Command type 3 IOCB * @tot_dsds: Total number of segments to transfer */ -inline void +static inline void qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, uint16_t tot_dsds) { @@ -710,32 +704,27 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, struct scsi_cmnd *cmd; struct scatterlist *sg; int i; - struct req_que *req; cmd = GET_CMD_SP(sp); /* Update entry type to indicate Command Type 3 IOCB */ - *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_TYPE_7); + *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return; } vha = sp->fcport->vha; - req = vha->req; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { - cmd_pkt->task_mgmt_flags = - __constant_cpu_to_le16(TMF_WRITE_DATA); + cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); vha->qla_stats.output_bytes += scsi_bufflen(cmd); vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { - cmd_pkt->task_mgmt_flags = - __constant_cpu_to_le16(TMF_READ_DATA); + cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); vha->qla_stats.input_bytes += scsi_bufflen(cmd); vha->qla_stats.input_requests++; } @@ -809,7 +798,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, * match LBA in CDB + N */ case SCSI_PROT_DIF_TYPE2: - pkt->app_tag = __constant_cpu_to_le16(0); + pkt->app_tag = cpu_to_le16(0); pkt->app_tag_mask[0] = 0x0; pkt->app_tag_mask[1] = 0x0; @@ -840,7 +829,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, case SCSI_PROT_DIF_TYPE1: pkt->ref_tag = cpu_to_le32((uint32_t) (0xffffffff & scsi_get_lba(cmd))); - pkt->app_tag = __constant_cpu_to_le16(0); + pkt->app_tag = cpu_to_le16(0); pkt->app_tag_mask[0] = 0x0; pkt->app_tag_mask[1] = 0x0; @@ -933,11 +922,9 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, dma_addr_t sle_dma; uint32_t sle_dma_len, tot_prot_dma_len = 0; struct scsi_cmnd *cmd; - struct scsi_qla_host *vha; memset(&sgx, 0, sizeof(struct qla2_sgx)); if (sp) { - vha = sp->fcport->vha; cmd = GET_CMD_SP(sp); prot_int = cmd->device->sector_size; @@ -947,7 +934,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, sg_prot = scsi_prot_sglist(cmd); } else if (tc) { - vha = tc->vha; prot_int = tc->blk_sz; sgx.tot_bytes = tc->bufflen; sgx.cur_sg = tc->sg; @@ -1047,15 +1033,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, int i; uint16_t used_dsds = tot_dsds; struct scsi_cmnd *cmd; - struct scsi_qla_host *vha; if (sp) { cmd = GET_CMD_SP(sp); sgl = scsi_sglist(cmd); - vha = sp->fcport->vha; } else if (tc) { sgl = tc->sg; - vha = tc->vha; } else { BUG(); return 1; @@ -1231,7 +1214,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, uint32_t *cur_dsd, *fcp_dl; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; - int sgc; uint32_t total_bytes = 0; uint32_t data_bytes; uint32_t dif_bytes; @@ -1247,10 +1229,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, cmd = GET_CMD_SP(sp); - sgc = 0; /* Update entry type to indicate Command Type CRC_2 IOCB */ - *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_TYPE_CRC_2); + *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2); vha = sp->fcport->vha; ha = vha->hw; @@ -1258,7 +1238,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, /* No data transfer */ data_bytes = scsi_bufflen(cmd); if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return QLA_SUCCESS; } @@ -1267,10 +1247,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_WRITE_DATA); + cpu_to_le16(CF_WRITE_DATA); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_READ_DATA); + cpu_to_le16(CF_READ_DATA); } if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || @@ -1392,7 +1372,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); - crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); + crc_ctx_pkt->guard_seed = cpu_to_le16(0); /* Fibre channel byte count */ cmd_pkt->byte_count = cpu_to_le32(total_bytes); fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + @@ -1400,13 +1380,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, *fcp_dl = htonl(total_bytes); if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return QLA_SUCCESS; } /* Walks data segments */ - cmd_pkt->control_flags |= - __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); + cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); if (!bundling && tot_prot_dsds) { if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, @@ -1418,8 +1397,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, if (bundling && tot_prot_dsds) { /* Walks dif segments */ - cmd_pkt->control_flags |= - __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); + cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, tot_prot_dsds, NULL)) @@ -1442,7 +1420,7 @@ crc_queuing_error: int qla24xx_start_scsi(srb_t *sp) { - int ret, nseg; + int nseg; unsigned long flags; uint32_t *clr_ptr; uint32_t index; @@ -1458,8 +1436,6 @@ qla24xx_start_scsi(srb_t *sp) struct qla_hw_data *ha = vha->hw; /* Setup device pointers. */ - ret = 0; - qla25xx_set_que(sp, &rsp); req = vha->req; @@ -1753,7 +1729,7 @@ qla24xx_dif_start_scsi(srb_t *sp) cmd_pkt->entry_count = (uint8_t)req_cnt; /* Specify response queue number where completion should happen */ cmd_pkt->entry_status = (uint8_t) rsp->id; - cmd_pkt->timeout = __constant_cpu_to_le16(0); + cmd_pkt->timeout = cpu_to_le16(0); wmb(); /* Adjust ring index. */ @@ -1819,7 +1795,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) { struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; - device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); + device_reg_t *reg = ISP_QUE_REG(ha, req->id); uint32_t index, handle; request_t *pkt; uint16_t cnt, req_cnt; @@ -1943,6 +1919,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); + if (!sp->fcport->tgt_session || + !sp->fcport->tgt_session->keep_nport_handle) + logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; @@ -2041,10 +2020,10 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->entry_status = 0; els_iocb->handle = sp->handle; els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); - els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); + els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); els_iocb->vp_index = sp->fcport->vha->vp_idx; els_iocb->sof_type = EST_SOFI3; - els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); + els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); els_iocb->opcode = sp->type == SRB_ELS_CMD_RPT ? @@ -2088,7 +2067,6 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) struct qla_hw_data *ha = vha->hw; struct fc_bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; - int cont_iocb_prsnt = 0; int entry_count = 1; memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); @@ -2096,13 +2074,13 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) ct_iocb->entry_status = 0; ct_iocb->handle1 = sp->handle; SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); - ct_iocb->status = __constant_cpu_to_le16(0); - ct_iocb->control_flags = __constant_cpu_to_le16(0); + ct_iocb->status = cpu_to_le16(0); + ct_iocb->control_flags = cpu_to_le16(0); ct_iocb->timeout = 0; ct_iocb->cmd_dsd_count = - __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); + cpu_to_le16(bsg_job->request_payload.sg_cnt); ct_iocb->total_dsd_count = - __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); + cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); ct_iocb->req_bytecount = cpu_to_le32(bsg_job->request_payload.payload_len); ct_iocb->rsp_bytecount = @@ -2139,7 +2117,6 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) vha->hw->req_q_map[0]); cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; avail_dsds = 5; - cont_iocb_prsnt = 1; entry_count++; } @@ -2167,7 +2144,6 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) struct qla_hw_data *ha = vha->hw; struct fc_bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; - int cont_iocb_prsnt = 0; int entry_count = 1; ct_iocb->entry_type = CT_IOCB_TYPE; @@ -2177,13 +2153,13 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); ct_iocb->vp_index = sp->fcport->vha->vp_idx; - ct_iocb->comp_status = __constant_cpu_to_le16(0); + ct_iocb->comp_status = cpu_to_le16(0); ct_iocb->cmd_dsd_count = - __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); + cpu_to_le16(bsg_job->request_payload.sg_cnt); ct_iocb->timeout = 0; ct_iocb->rsp_dsd_count = - __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); + cpu_to_le16(bsg_job->reply_payload.sg_cnt); ct_iocb->rsp_byte_count = cpu_to_le32(bsg_job->reply_payload.payload_len); ct_iocb->cmd_byte_count = @@ -2214,7 +2190,6 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) ha->req_q_map[0]); cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; avail_dsds = 5; - cont_iocb_prsnt = 1; entry_count++; } @@ -2237,7 +2212,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) int qla82xx_start_scsi(srb_t *sp) { - int ret, nseg; + int nseg; unsigned long flags; struct scsi_cmnd *cmd; uint32_t *clr_ptr; @@ -2257,7 +2232,6 @@ qla82xx_start_scsi(srb_t *sp) struct rsp_que *rsp = NULL; /* Setup device pointers. */ - ret = 0; reg = &ha->iobase->isp82; cmd = GET_CMD_SP(sp); req = vha->req; @@ -2536,16 +2510,12 @@ sufficient_dsds: /* write, read and verify logic */ dbval = dbval | (req->id << 8) | (req->ring_index << 16); if (ql2xdbwr) - qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); + qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); else { - WRT_REG_DWORD( - (unsigned long __iomem *)ha->nxdb_wr_ptr, - dbval); + WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); - while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) { - WRT_REG_DWORD( - (unsigned long __iomem *)ha->nxdb_wr_ptr, - dbval); + while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { + WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); } } @@ -2679,7 +2649,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, /*Update entry type to indicate bidir command */ *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL); + cpu_to_le32(COMMAND_BIDIRECTIONAL); /* Set the transfer direction, in this set both flags * Also set the BD_WRAP_BACK flag, firmware will take care @@ -2687,8 +2657,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, */ cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); - cmd_pkt->control_flags = - __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | + cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | BD_WRAP_BACK); req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; @@ -2797,10 +2766,10 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) handle = req->current_outstanding_cmd; for (index = 1; index < req->num_outstanding_cmds; index++) { handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; } if (index == req->num_outstanding_cmds) { diff --git a/kernel/drivers/scsi/qla2xxx/qla_isr.c b/kernel/drivers/scsi/qla2xxx/qla_isr.c index 6dc14cd78..0e59731f9 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_isr.c +++ b/kernel/drivers/scsi/qla2xxx/qla_isr.c @@ -116,7 +116,7 @@ bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) { /* Check for PCI disconnection */ - if (reg == 0xffffffff) { + if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { @@ -560,6 +560,17 @@ qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) return ret; } +static inline fc_port_t * +qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) +{ + fc_port_t *fcport; + + list_for_each_entry(fcport, &vha->vp_fcports, list) + if (fcport->loop_id == loop_id) + return fcport; + return NULL; +} + /** * qla2x00_async_event() - Process aynchronous events. * @ha: SCSI driver HA context @@ -575,7 +586,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; - uint32_t rscn_entry, host_pid, tmp_pid; + uint32_t rscn_entry, host_pid; unsigned long flags; fc_port_t *fcport = NULL; @@ -897,11 +908,29 @@ skip_rio: (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) break; - /* Global event -- port logout or port unavailable. */ - if (mb[1] == 0xffff && mb[2] == 0x7) { + if (mb[2] == 0x7) { ql_dbg(ql_dbg_async, vha, 0x5010, - "Port unavailable %04x %04x %04x.\n", + "Port %s %04x %04x %04x.\n", + mb[1] == 0xffff ? "unavailable" : "logout", mb[1], mb[2], mb[3]); + + if (mb[1] == 0xffff) + goto global_port_update; + + /* Port logout */ + fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); + if (!fcport) + break; + if (atomic_read(&fcport->state) != FCS_ONLINE) + break; + ql_dbg(ql_dbg_async, vha, 0x508a, + "Marking port lost loopid=%04x portid=%06x.\n", + fcport->loop_id, fcport->d_id.b24); + qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + break; + +global_port_update: + /* Port unavailable. */ ql_log(ql_log_warn, vha, 0x505e, "Link is offline.\n"); @@ -998,7 +1027,6 @@ skip_rio: list_for_each_entry(fcport, &vha->vp_fcports, list) { if (atomic_read(&fcport->state) != FCS_ONLINE) continue; - tmp_pid = fcport->d_id.b24; if (fcport->d_id.b24 == rscn_entry) { qla2x00_mark_device_lost(vha, fcport, 0, 0); break; @@ -1565,7 +1593,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) "Async-%s error - hdl=%x entry-status(%x).\n", type, sp->handle, sts->entry_status); iocb->u.tmf.data = QLA_FUNCTION_FAILED; - } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { + } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_log(ql_log_warn, fcport->vha, 0x5039, "Async-%s error - hdl=%x completion status(%x).\n", type, sp->handle, sts->comp_status); @@ -1580,7 +1608,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) ql_log(ql_log_warn, fcport->vha, 0x503c, "Async-%s error - hdl=%x response(%x).\n", type, sp->handle, sts->data[3]); - iocb->u.tmf.data = QLA_FUNCTION_FAILED; + iocb->u.tmf.data = QLA_FUNCTION_FAILED; } } @@ -1979,7 +2007,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, rval = EXT_STATUS_ERR; break; } - bsg_job->reply->reply_payload_rcv_len = 0; + bsg_job->reply->reply_payload_rcv_len = 0; done: /* Return the vendor specific reply to API */ @@ -2045,14 +2073,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) } /* Validate handle. */ - if (handle < req->num_outstanding_cmds) + if (handle < req->num_outstanding_cmds) { sp = req->outstanding_cmds[handle]; - else - sp = NULL; - - if (sp == NULL) { + if (!sp) { + ql_dbg(ql_dbg_io, vha, 0x3075, + "%s(%ld): Already returned command for status handle (0x%x).\n", + __func__, vha->host_no, sts->handle); + return; + } + } else { ql_dbg(ql_dbg_io, vha, 0x3017, - "Invalid status handle (0x%x).\n", sts->handle); + "Invalid status handle, out of range (0x%x).\n", + sts->handle); if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { if (IS_P3P_TYPE(ha)) @@ -2339,12 +2371,12 @@ out: ql_dbg(ql_dbg_io, fcport->vha, 0x3022, "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " - "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", + "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", comp_status, scsi_status, res, vha->host_no, cp->device->id, cp->device->lun, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, - resid_len, fw_resid_len); + resid_len, fw_resid_len, sp, cp); if (rsp->status_srb == NULL) sp->done(ha, sp, res); @@ -2441,13 +2473,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) } fatal: ql_log(ql_log_warn, vha, 0x5030, - "Error entry - invalid handle/queue.\n"); - - if (IS_P3P_TYPE(ha)) - set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); - else - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); + "Error entry - invalid handle/queue (%04x).\n", que); } /** @@ -2992,9 +3018,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) "MSI-X: Failed to enable support " "-- %d/%d\n Retry with %d vectors.\n", ha->msix_count, ret, ret); + ha->msix_count = ret; + ha->max_rsp_queues = ha->msix_count - 1; } - ha->msix_count = ret; - ha->max_rsp_queues = ha->msix_count - 1; ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_count, GFP_KERNEL); if (!ha->msix_entries) { diff --git a/kernel/drivers/scsi/qla2xxx/qla_mbx.c b/kernel/drivers/scsi/qla2xxx/qla_mbx.c index 02b1c1c53..cb11e04be 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_mbx.c +++ b/kernel/drivers/scsi/qla2xxx/qla_mbx.c @@ -555,7 +555,9 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) if (IS_FWI2_CAPABLE(ha)) mcp->in_mb |= MBX_17|MBX_16|MBX_15; if (IS_QLA27XX(ha)) - mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18; + mcp->in_mb |= MBX_23 | MBX_22 | MBX_21 | MBX_20 | MBX_19 | + MBX_18 | MBX_14 | MBX_13 | MBX_11 | MBX_10 | MBX_9 | MBX_8; + mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); @@ -571,6 +573,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ else ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; + if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { ha->mpi_version[0] = mcp->mb[10] & 0xff; ha->mpi_version[1] = mcp->mb[11] >> 8; @@ -580,6 +583,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) ha->phy_version[1] = mcp->mb[9] >> 8; ha->phy_version[2] = mcp->mb[9] & 0xff; } + if (IS_FWI2_CAPABLE(ha)) { ha->fw_attributes_h = mcp->mb[15]; ha->fw_attributes_ext[0] = mcp->mb[16]; @@ -591,7 +595,14 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", __func__, mcp->mb[17], mcp->mb[16]); } + if (IS_QLA27XX(ha)) { + ha->mpi_version[0] = mcp->mb[10] & 0xff; + ha->mpi_version[1] = mcp->mb[11] >> 8; + ha->mpi_version[2] = mcp->mb[11] & 0xff; + ha->pep_version[0] = mcp->mb[13] & 0xff; + ha->pep_version[1] = mcp->mb[14] >> 8; + ha->pep_version[2] = mcp->mb[14] & 0xff; ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; } @@ -1135,20 +1146,22 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; } /* If FA-WWN supported */ - if (mcp->mb[7] & BIT_14) { - vha->port_name[0] = MSB(mcp->mb[16]); - vha->port_name[1] = LSB(mcp->mb[16]); - vha->port_name[2] = MSB(mcp->mb[17]); - vha->port_name[3] = LSB(mcp->mb[17]); - vha->port_name[4] = MSB(mcp->mb[18]); - vha->port_name[5] = LSB(mcp->mb[18]); - vha->port_name[6] = MSB(mcp->mb[19]); - vha->port_name[7] = LSB(mcp->mb[19]); - fc_host_port_name(vha->host) = - wwn_to_u64(vha->port_name); - ql_dbg(ql_dbg_mbx, vha, 0x10ca, - "FA-WWN acquired %016llx\n", - wwn_to_u64(vha->port_name)); + if (IS_FAWWN_CAPABLE(vha->hw)) { + if (mcp->mb[7] & BIT_14) { + vha->port_name[0] = MSB(mcp->mb[16]); + vha->port_name[1] = LSB(mcp->mb[16]); + vha->port_name[2] = MSB(mcp->mb[17]); + vha->port_name[3] = LSB(mcp->mb[17]); + vha->port_name[4] = MSB(mcp->mb[18]); + vha->port_name[5] = LSB(mcp->mb[18]); + vha->port_name[6] = MSB(mcp->mb[19]); + vha->port_name[7] = LSB(mcp->mb[19]); + fc_host_port_name(vha->host) = + wwn_to_u64(vha->port_name); + ql_dbg(ql_dbg_mbx, vha, 0x10ca, + "FA-WWN acquired %016llx\n", + wwn_to_u64(vha->port_name)); + } } } @@ -1239,7 +1252,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) "Entered %s.\n", __func__); if (IS_P3P_TYPE(ha) && ql2xdbwr) - qla82xx_wr_32(ha, ha->nxdb_wr_ptr, + qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); if (ha->flags.npiv_supported) @@ -1865,7 +1878,6 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint32_t iop[2]; struct qla_hw_data *ha = vha->hw; struct req_que *req; - struct rsp_que *rsp; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, "Entered %s.\n", __func__); @@ -1874,7 +1886,6 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, req = ha->req_q_map[0]; else req = vha->req; - rsp = req->rsp; lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { @@ -1888,11 +1899,11 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, lg->entry_count = 1; lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); - lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); + lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); if (opt & BIT_0) - lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI); + lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); if (opt & BIT_1) - lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI); + lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; @@ -1907,7 +1918,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, "Failed to complete IOCB -- error status (%x).\n", lg->entry_status); rval = QLA_FUNCTION_FAILED; - } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { + } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { iop[0] = le32_to_cpu(lg->io_parameter[0]); iop[1] = le32_to_cpu(lg->io_parameter[1]); @@ -1961,7 +1972,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, mb[10] |= BIT_0; /* Class 2. */ if (lg->io_parameter[9] || lg->io_parameter[10]) mb[10] |= BIT_1; /* Class 3. */ - if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7)) + if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) mb[10] |= BIT_7; /* Confirmed Completion * Allowed */ @@ -2142,7 +2153,6 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, dma_addr_t lg_dma; struct qla_hw_data *ha = vha->hw; struct req_que *req; - struct rsp_que *rsp; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, "Entered %s.\n", __func__); @@ -2159,13 +2169,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, req = ha->req_q_map[0]; else req = vha->req; - rsp = req->rsp; lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = - __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| + cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| LCF_FREE_NPORT); lg->port_id[0] = al_pa; lg->port_id[1] = area; @@ -2181,7 +2190,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, "Failed to complete IOCB -- error status (%x).\n", lg->entry_status); rval = QLA_FUNCTION_FAILED; - } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { + } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x1071, "Failed to complete IOCB -- completion status (%x) " "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), @@ -2415,7 +2424,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, *orig_iocb_cnt = mcp->mb[10]; if (vha->hw->flags.npiv_supported && max_npiv_vports) *max_npiv_vports = mcp->mb[11]; - if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs) + if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || + IS_QLA27XX(vha->hw)) && max_fcfs) *max_fcfs = mcp->mb[12]; } @@ -2672,7 +2682,7 @@ qla24xx_abort_command(srb_t *sp) "Failed to complete IOCB -- error status (%x).\n", abt->entry_status); rval = QLA_FUNCTION_FAILED; - } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { + } else if (abt->nport_handle != cpu_to_le16(0)) { ql_dbg(ql_dbg_mbx, vha, 0x1090, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(abt->nport_handle)); @@ -2755,8 +2765,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, "Failed to complete IOCB -- error status (%x).\n", sts->entry_status); rval = QLA_FUNCTION_FAILED; - } else if (sts->comp_status != - __constant_cpu_to_le16(CS_COMPLETE)) { + } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x1096, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(sts->comp_status)); @@ -2852,7 +2861,8 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw)) + if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && + !IS_QLA27XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, @@ -2890,7 +2900,8 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw)) + if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && + !IS_QLA27XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, @@ -3482,7 +3493,7 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha) "Failed to complete IOCB -- error status (%x).\n", vpmod->comp_status); rval = QLA_FUNCTION_FAILED; - } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { + } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x10bf, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(vpmod->comp_status)); @@ -3541,7 +3552,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) vce->entry_type = VP_CTRL_IOCB_TYPE; vce->entry_count = 1; vce->command = cpu_to_le16(cmd); - vce->vp_count = __constant_cpu_to_le16(1); + vce->vp_count = cpu_to_le16(1); /* index map in firmware starts with 1; decrement index * this is ok as we never use index 0 @@ -3561,7 +3572,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) "Failed to complete IOCB -- error status (%x).\n", vce->entry_status); rval = QLA_FUNCTION_FAILED; - } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { + } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x10c5, "Failed to complet IOCB -- completion status (%x).\n", le16_to_cpu(vce->comp_status)); @@ -3898,7 +3909,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(rsp->options & BIT_0)) { WRT_REG_DWORD(rsp->rsp_q_out, 0); - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) WRT_REG_DWORD(rsp->rsp_q_in, 0); } @@ -5345,7 +5356,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); diff --git a/kernel/drivers/scsi/qla2xxx/qla_mid.c b/kernel/drivers/scsi/qla2xxx/qla_mid.c index cc9419251..cf7ba52ba 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_mid.c +++ b/kernel/drivers/scsi/qla2xxx/qla_mid.c @@ -371,7 +371,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) void qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) { - int ret; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; unsigned long flags = 0; @@ -392,7 +391,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); - ret = qla2x00_do_dpc_vp(vp); + qla2x00_do_dpc_vp(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); @@ -601,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) /* Delete request queues */ for (cnt = 1; cnt < ha->max_req_queues; cnt++) { req = ha->req_q_map[cnt]; - if (req) { + if (req && test_bit(cnt, ha->req_qid_map)) { ret = qla25xx_delete_req_que(vha, req); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00ea, @@ -615,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) /* Delete response queues */ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { rsp = ha->rsp_q_map[cnt]; - if (rsp) { + if (rsp && test_bit(cnt, ha->rsp_qid_map)) { ret = qla25xx_delete_rsp_que(vha, rsp); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00eb, diff --git a/kernel/drivers/scsi/qla2xxx/qla_mr.c b/kernel/drivers/scsi/qla2xxx/qla_mr.c index 6d190b4b8..b5029e543 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_mr.c +++ b/kernel/drivers/scsi/qla2xxx/qla_mr.c @@ -862,7 +862,7 @@ qlafx00_config_queues(struct scsi_qla_host *vha) dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2); req->length = ha->req_que_len; - req->ring = (void *)ha->iobase + ha->req_que_off; + req->ring = (void __force *)ha->iobase + ha->req_que_off; req->dma = bar2_hdl + ha->req_que_off; if ((!req->ring) || (req->length == 0)) { ql_log_pci(ql_log_info, ha->pdev, 0x012f, @@ -877,7 +877,7 @@ qlafx00_config_queues(struct scsi_qla_host *vha) ha->req_que_off, (u64)req->dma); rsp->length = ha->rsp_que_len; - rsp->ring = (void *)ha->iobase + ha->rsp_que_off; + rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off; rsp->dma = bar2_hdl + ha->rsp_que_off; if ((!rsp->ring) || (rsp->length == 0)) { ql_log_pci(ql_log_info, ha->pdev, 0x0131, @@ -1317,10 +1317,10 @@ int qlafx00_configure_devices(scsi_qla_host_t *vha) { int rval; - unsigned long flags, save_flags; + unsigned long flags; rval = QLA_SUCCESS; - save_flags = flags = vha->dpc_flags; + flags = vha->dpc_flags; ql_dbg(ql_dbg_disc, vha, 0x2090, "Configure devices -- dpc flags =0x%lx\n", flags); @@ -1425,7 +1425,7 @@ qlafx00_init_response_q_entries(struct rsp_que *rsp) pkt = rsp->ring_ptr; for (cnt = 0; cnt < rsp->length; cnt++) { pkt->signature = RESPONSE_PROCESSED; - WRT_REG_DWORD((void __iomem *)&pkt->signature, + WRT_REG_DWORD((void __force __iomem *)&pkt->signature, RESPONSE_PROCESSED); pkt++; } @@ -2279,7 +2279,6 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) struct sts_entry_fx00 *sts; __le16 comp_status; __le16 scsi_status; - uint16_t ox_id; __le16 lscsi_status; int32_t resid; uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, @@ -2344,7 +2343,6 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) fcport = sp->fcport; - ox_id = 0; sense_len = par_sense_len = rsp_info_len = resid_len = fw_resid_len = 0; if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)) @@ -2528,12 +2526,12 @@ check_scsi_status: ql_dbg(ql_dbg_io, fcport->vha, 0x3058, "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x " - "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, " + "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, " "par_sense_len=0x%x, rsp_info_len=0x%x\n", comp_status, scsi_status, res, vha->host_no, cp->device->id, cp->device->lun, fcport->tgt_id, lscsi_status, cp->cmnd, scsi_bufflen(cp), - rsp_info_len, resid_len, fw_resid_len, sense_len, + rsp_info, resid_len, fw_resid_len, sense_len, par_sense_len, rsp_info_len); if (rsp->status_srb == NULL) @@ -3009,7 +3007,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { - lcmd_pkt->byte_count = __constant_cpu_to_le32(0); + lcmd_pkt->byte_count = cpu_to_le32(0); return; } @@ -3071,7 +3069,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, int qlafx00_start_scsi(srb_t *sp) { - int ret, nseg; + int nseg; unsigned long flags; uint32_t index; uint32_t handle; @@ -3088,8 +3086,6 @@ qlafx00_start_scsi(srb_t *sp) struct scsi_lun llun; /* Setup device pointers. */ - ret = 0; - rsp = ha->rsp_q_map[0]; req = vha->req; diff --git a/kernel/drivers/scsi/qla2xxx/qla_nx.c b/kernel/drivers/scsi/qla2xxx/qla_nx.c index 7d2b18f26..b6b4cfdd7 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_nx.c +++ b/kernel/drivers/scsi/qla2xxx/qla_nx.c @@ -347,32 +347,31 @@ char *qdev_state(uint32_t dev_state) } /* - * In: 'off' is offset from CRB space in 128M pci map - * Out: 'off' is 2M pci map addr + * In: 'off_in' is offset from CRB space in 128M pci map + * Out: 'off_out' is 2M pci map addr * side effect: lock crb window */ static void -qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) +qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in, + void __iomem **off_out) { u32 win_read; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); - ha->crb_win = CRB_HI(*off); - writel(ha->crb_win, - (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); + ha->crb_win = CRB_HI(off_in); + writel(ha->crb_win, CRB_WINDOW_2M + ha->nx_pcibase); /* Read back value to make sure write has gone through before trying * to use it. */ - win_read = RD_REG_DWORD((void __iomem *) - (CRB_WINDOW_2M + ha->nx_pcibase)); + win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase); if (win_read != ha->crb_win) { ql_dbg(ql_dbg_p3p, vha, 0xb000, "%s: Written crbwin (0x%x) " "!= Read crbwin (0x%x), off=0x%lx.\n", - __func__, ha->crb_win, win_read, *off); + __func__, ha->crb_win, win_read, off_in); } - *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; + *off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; } static inline unsigned long @@ -417,32 +416,34 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) } static int -qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off) +qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in, + void __iomem **off_out) { struct crb_128M_2M_sub_block_map *m; - if (*off >= QLA82XX_CRB_MAX) + if (off_in >= QLA82XX_CRB_MAX) return -1; - if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) { - *off = (*off - QLA82XX_PCI_CAMQM) + + if (off_in >= QLA82XX_PCI_CAMQM && off_in < QLA82XX_PCI_CAMQM_2M_END) { + *off_out = (off_in - QLA82XX_PCI_CAMQM) + QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; return 0; } - if (*off < QLA82XX_PCI_CRBSPACE) + if (off_in < QLA82XX_PCI_CRBSPACE) return -1; - *off -= QLA82XX_PCI_CRBSPACE; + off_in -= QLA82XX_PCI_CRBSPACE; /* Try direct map */ - m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; + m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)]; - if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { - *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase; + if (m->valid && (m->start_128M <= off_in) && (m->end_128M > off_in)) { + *off_out = off_in + m->start_2M - m->start_128M + ha->nx_pcibase; return 0; } /* Not in direct map, use crb window */ + *off_out = (void __iomem *)off_in; return 1; } @@ -465,51 +466,61 @@ static int qla82xx_crb_win_lock(struct qla_hw_data *ha) } int -qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data) +qla82xx_wr_32(struct qla_hw_data *ha, ulong off_in, u32 data) { + void __iomem *off; unsigned long flags = 0; int rv; - rv = qla82xx_pci_get_crb_addr_2M(ha, &off); + rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off); BUG_ON(rv == -1); if (rv == 1) { +#ifndef __CHECKER__ write_lock_irqsave(&ha->hw_lock, flags); +#endif qla82xx_crb_win_lock(ha); - qla82xx_pci_set_crbwindow_2M(ha, &off); + qla82xx_pci_set_crbwindow_2M(ha, off_in, &off); } writel(data, (void __iomem *)off); if (rv == 1) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); +#ifndef __CHECKER__ write_unlock_irqrestore(&ha->hw_lock, flags); +#endif } return 0; } int -qla82xx_rd_32(struct qla_hw_data *ha, ulong off) +qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in) { + void __iomem *off; unsigned long flags = 0; int rv; u32 data; - rv = qla82xx_pci_get_crb_addr_2M(ha, &off); + rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off); BUG_ON(rv == -1); if (rv == 1) { +#ifndef __CHECKER__ write_lock_irqsave(&ha->hw_lock, flags); +#endif qla82xx_crb_win_lock(ha); - qla82xx_pci_set_crbwindow_2M(ha, &off); + qla82xx_pci_set_crbwindow_2M(ha, off_in, &off); } - data = RD_REG_DWORD((void __iomem *)off); + data = RD_REG_DWORD(off); if (rv == 1) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); +#ifndef __CHECKER__ write_unlock_irqrestore(&ha->hw_lock, flags); +#endif } return data; } @@ -547,9 +558,6 @@ void qla82xx_idc_unlock(struct qla_hw_data *ha) qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); } -/* PCI Windowing for DDR regions. */ -#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \ - (((addr) <= (high)) && ((addr) >= (low))) /* * check memory access boundary. * used by test agent. support ddr access only for now @@ -558,9 +566,9 @@ static unsigned long qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, unsigned long long addr, int size) { - if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, + if (!addr_in_range(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || - !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET, + !addr_in_range(addr + size - 1, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || ((size != 1) && (size != 2) && (size != 4) && (size != 8))) return 0; @@ -577,7 +585,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) u32 win_read; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); - if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, + if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX)) { /* DDR network side */ window = MN_WIN(addr); @@ -592,7 +600,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; - } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, + } else if (addr_in_range(addr, QLA82XX_ADDR_OCM0, QLA82XX_ADDR_OCM0_MAX)) { unsigned int temp1; if ((addr & 0x00ff800) == 0xff800) { @@ -615,7 +623,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; - } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, + } else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, QLA82XX_P3_ADDR_QDR_NET_MAX)) { /* QDR network side */ window = MS_WIN(addr); @@ -656,16 +664,16 @@ static int qla82xx_pci_is_same_window(struct qla_hw_data *ha, qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; /* DDR network side */ - if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, + if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX)) BUG(); - else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, + else if (addr_in_range(addr, QLA82XX_ADDR_OCM0, QLA82XX_ADDR_OCM0_MAX)) return 1; - else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1, + else if (addr_in_range(addr, QLA82XX_ADDR_OCM1, QLA82XX_ADDR_OCM1_MAX)) return 1; - else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) { + else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) { /* QDR network side */ window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; if (ha->qdr_sn_window == window) @@ -922,20 +930,18 @@ qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag) { uint32_t off_value, rval = 0; - WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase), - (off & 0xFFFF0000)); + WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000); /* Read back value to make sure write has gone through */ - RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); + RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase); off_value = (off & 0x0000FFFF); if (flag) - WRT_REG_DWORD((void __iomem *) - (off_value + CRB_INDIRECT_2M + ha->nx_pcibase), - data); + WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase, + data); else - rval = RD_REG_DWORD((void __iomem *) - (off_value + CRB_INDIRECT_2M + ha->nx_pcibase)); + rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M + + ha->nx_pcibase); return rval; } @@ -1663,8 +1669,7 @@ qla82xx_iospace_config(struct qla_hw_data *ha) } len = pci_resource_len(ha->pdev, 0); - ha->nx_pcibase = - (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len); + ha->nx_pcibase = ioremap(pci_resource_start(ha->pdev, 0), len); if (!ha->nx_pcibase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000e, "Cannot remap pcibase MMIO, aborting.\n"); @@ -1673,17 +1678,13 @@ qla82xx_iospace_config(struct qla_hw_data *ha) /* Mapping of IO base pointer */ if (IS_QLA8044(ha)) { - ha->iobase = - (device_reg_t *)((uint8_t *)ha->nx_pcibase); + ha->iobase = ha->nx_pcibase; } else if (IS_QLA82XX(ha)) { - ha->iobase = - (device_reg_t *)((uint8_t *)ha->nx_pcibase + - 0xbc000 + (ha->pdev->devfn << 11)); + ha->iobase = ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11); } if (!ql2xdbwr) { - ha->nxdb_wr_ptr = - (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) + + ha->nxdb_wr_ptr = ioremap((pci_resource_start(ha->pdev, 4) + (ha->pdev->devfn << 12)), 4); if (!ha->nxdb_wr_ptr) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000f, @@ -1694,10 +1695,10 @@ qla82xx_iospace_config(struct qla_hw_data *ha) /* Mapping of IO base pointer, * door bell read and write pointer */ - ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) + + ha->nxdb_rd_ptr = ha->nx_pcibase + (512 * 1024) + (ha->pdev->devfn * 8); } else { - ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ? + ha->nxdb_wr_ptr = (void __iomem *)(ha->pdev->devfn == 6 ? QLA82XX_CAMRAM_DB1 : QLA82XX_CAMRAM_DB2); } @@ -1707,12 +1708,12 @@ qla82xx_iospace_config(struct qla_hw_data *ha) ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006, "nx_pci_base=%p iobase=%p " "max_req_queues=%d msix_count=%d.\n", - (void *)ha->nx_pcibase, ha->iobase, + ha->nx_pcibase, ha->iobase, ha->max_req_queues, ha->msix_count); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010, "nx_pci_base=%p iobase=%p " "max_req_queues=%d msix_count=%d.\n", - (void *)ha->nx_pcibase, ha->iobase, + ha->nx_pcibase, ha->iobase, ha->max_req_queues, ha->msix_count); return 0; @@ -1740,8 +1741,8 @@ qla82xx_pci_config(scsi_qla_host_t *vha) ret = pci_set_mwi(ha->pdev); ha->chip_revision = ha->pdev->revision; ql_dbg(ql_dbg_init, vha, 0x0043, - "Chip revision:%d.\n", - ha->chip_revision); + "Chip revision:%d; pci_set_mwi() returned %d.\n", + ha->chip_revision, ret); return 0; } @@ -1768,8 +1769,8 @@ void qla82xx_config_rings(struct scsi_qla_host *vha) /* Setup ring parameters in initialization control block. */ icb = (struct init_cb_81xx *)ha->init_cb; - icb->request_q_outpointer = __constant_cpu_to_le16(0); - icb->response_q_inpointer = __constant_cpu_to_le16(0); + icb->request_q_outpointer = cpu_to_le16(0); + icb->response_q_inpointer = cpu_to_le16(0); icb->request_q_length = cpu_to_le16(req->length); icb->response_q_length = cpu_to_le16(rsp->length); icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); @@ -1777,9 +1778,9 @@ void qla82xx_config_rings(struct scsi_qla_host *vha) icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); - WRT_REG_DWORD((unsigned long __iomem *)®->req_q_out[0], 0); - WRT_REG_DWORD((unsigned long __iomem *)®->rsp_q_in[0], 0); - WRT_REG_DWORD((unsigned long __iomem *)®->rsp_q_out[0], 0); + WRT_REG_DWORD(®->req_q_out[0], 0); + WRT_REG_DWORD(®->rsp_q_in[0], 0); + WRT_REG_DWORD(®->rsp_q_out[0], 0); } static int @@ -1843,7 +1844,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha) ptab_desc = qla82xx_get_table_desc(unirom, QLA82XX_URI_DIR_SECT_PRODUCT_TBL); - if (!ptab_desc) + if (!ptab_desc) return -1; entries = cpu_to_le32(ptab_desc->num_entries); @@ -2298,7 +2299,7 @@ void qla82xx_init_flags(struct qla_hw_data *ha) ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; } -inline void +static inline void qla82xx_set_idc_version(scsi_qla_host_t *vha) { int idc_ver; @@ -2481,14 +2482,12 @@ try_blob_fw: ql_log(ql_log_info, vha, 0x00a5, "Firmware loaded successfully from binary blob.\n"); return QLA_SUCCESS; - } else { - ql_log(ql_log_fatal, vha, 0x00a6, - "Firmware load failed for binary blob.\n"); - blob->fw = NULL; - blob = NULL; - goto fw_load_failed; } - return QLA_SUCCESS; + + ql_log(ql_log_fatal, vha, 0x00a6, + "Firmware load failed for binary blob.\n"); + blob->fw = NULL; + blob = NULL; fw_load_failed: return QLA_FUNCTION_FAILED; @@ -2549,7 +2548,7 @@ qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, "Do ROM fast read failed.\n"); goto done_read; } - dwptr[i] = __constant_cpu_to_le32(val); + dwptr[i] = cpu_to_le32(val); } done_read: return dwptr; @@ -2671,7 +2670,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, { int ret; uint32_t liter; - uint32_t sec_mask, rest_addr; + uint32_t rest_addr; dma_addr_t optrom_dma; void *optrom = NULL; int page_mode = 0; @@ -2693,7 +2692,6 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, } rest_addr = ha->fdt_block_size - 1; - sec_mask = ~rest_addr; ret = qla82xx_unprotect_flash(ha); if (ret) { @@ -2789,7 +2787,6 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; - struct device_reg_82xx __iomem *reg; uint32_t dbval; /* Adjust ring index. */ @@ -2800,18 +2797,16 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha) } else req->ring_ptr++; - reg = &ha->iobase->isp82; dbval = 0x04 | (ha->portnum << 5); dbval = dbval | (req->id << 8) | (req->ring_index << 16); if (ql2xdbwr) - qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); + qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval); else { - WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); + WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); - while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) { - WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, - dbval); + while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { + WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); } } @@ -3842,8 +3837,7 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, loop_cnt = ocm_hdr->op_count; for (i = 0; i < loop_cnt; i++) { - r_value = RD_REG_DWORD((void __iomem *) - (r_addr + ha->nx_pcibase)); + r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } diff --git a/kernel/drivers/scsi/qla2xxx/qla_nx2.c b/kernel/drivers/scsi/qla2xxx/qla_nx2.c index ed4d6b6b5..007192d7b 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_nx2.c +++ b/kernel/drivers/scsi/qla2xxx/qla_nx2.c @@ -397,11 +397,11 @@ qla8044_idc_lock(struct qla_hw_data *ha) * has the lock, wait for 2secs * and retry */ - ql_dbg(ql_dbg_p3p, vha, 0xb08a, - "%s: IDC lock Recovery by %d " - "failed, Retrying timeout\n", __func__, - ha->portnum); - timeout = 0; + ql_dbg(ql_dbg_p3p, vha, 0xb08a, + "%s: IDC lock Recovery by %d " + "failed, Retrying timeout\n", __func__, + ha->portnum); + timeout = 0; } } msleep(QLA8044_DRV_LOCK_MSLEEP); @@ -462,12 +462,11 @@ qla8044_flash_lock(scsi_qla_host_t *vha) static void qla8044_flash_unlock(scsi_qla_host_t *vha) { - int ret_val; struct qla_hw_data *ha = vha->hw; /* Reading FLASH_UNLOCK register unlocks the Flash */ qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF); - ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK); + qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK); } @@ -561,7 +560,7 @@ qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, return buf; } -inline int +static inline int qla8044_need_reset(struct scsi_qla_host *vha) { uint32_t drv_state, drv_active; @@ -1130,9 +1129,9 @@ qla8044_ms_mem_write_128b(struct scsi_qla_host *vha, } for (i = 0; i < count; i++, addr += 16) { - if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET, + if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET, QLA8044_ADDR_QDR_NET_MAX)) || - (QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET, + (addr_in_range(addr, QLA8044_ADDR_DDR_NET, QLA8044_ADDR_DDR_NET_MAX)))) { ret_val = QLA_FUNCTION_FAILED; goto exit_ms_mem_write_unlock; @@ -1605,7 +1604,7 @@ qla8044_set_idc_dontreset(struct scsi_qla_host *vha) qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); } -inline void +static inline void qla8044_set_rst_ready(struct scsi_qla_host *vha) { uint32_t drv_state; @@ -2992,7 +2991,7 @@ qla8044_minidump_process_rddfe(struct scsi_qla_host *vha, uint32_t addr1, addr2, value, data, temp, wrVal; uint8_t stride, stride2; uint16_t count; - uint32_t poll, mask, data_size, modify_mask; + uint32_t poll, mask, modify_mask; uint32_t wait_count = 0; uint32_t *data_ptr = *d_ptr; @@ -3009,7 +3008,6 @@ qla8044_minidump_process_rddfe(struct scsi_qla_host *vha, poll = rddfe->poll; mask = rddfe->mask; modify_mask = rddfe->modify_mask; - data_size = rddfe->data_size; addr2 = addr1 + stride; @@ -3091,7 +3089,7 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha, uint8_t stride1, stride2; uint32_t addr3, addr4, addr5, addr6, addr7; uint16_t count, loop_cnt; - uint32_t poll, mask; + uint32_t mask; uint32_t *data_ptr = *d_ptr; struct qla8044_minidump_entry_rdmdio *rdmdio; @@ -3105,7 +3103,6 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha, stride2 = rdmdio->stride_2; count = rdmdio->count; - poll = rdmdio->poll; mask = rdmdio->mask; value2 = rdmdio->value_2; @@ -3141,8 +3138,7 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha, goto error; addr7 = addr2 - (4 * stride1); - data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, - mask, addr7); + data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7); if (data == -1) goto error; @@ -3165,7 +3161,7 @@ error: static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { - uint32_t addr1, addr2, value1, value2, poll, mask, r_value; + uint32_t addr1, addr2, value1, value2, poll, r_value; uint32_t wait_count = 0; struct qla8044_minidump_entry_pollwr *pollwr_hdr; @@ -3176,7 +3172,6 @@ static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha, value2 = pollwr_hdr->value_2; poll = pollwr_hdr->poll; - mask = pollwr_hdr->mask; while (wait_count < poll) { qla8044_rd_reg_indirect(vha, addr1, &r_value); diff --git a/kernel/drivers/scsi/qla2xxx/qla_nx2.h b/kernel/drivers/scsi/qla2xxx/qla_nx2.h index ada36057d..02fe3c4cd 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_nx2.h +++ b/kernel/drivers/scsi/qla2xxx/qla_nx2.h @@ -58,8 +58,10 @@ #define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff) /* PCI Windowing for DDR regions. */ -#define QLA8044_ADDR_IN_RANGE(addr, low, high) \ - (((addr) <= (high)) && ((addr) >= (low))) +static inline bool addr_in_range(u64 addr, u64 low, u64 high) +{ + return addr <= high && addr >= low; +} /* Indirectly Mapped Registers */ #define QLA8044_FLASH_SPI_STATUS 0x2808E010 diff --git a/kernel/drivers/scsi/qla2xxx/qla_os.c b/kernel/drivers/scsi/qla2xxx/qla_os.c index 7462dd70b..fc6674db4 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_os.c +++ b/kernel/drivers/scsi/qla2xxx/qla_os.c @@ -267,7 +267,6 @@ struct scsi_host_template qla2xxx_driver_template = { .shost_attrs = qla2x00_host_attrs, .supported_mode = MODE_INITIATOR, - .use_blk_tags = 1, .track_queue_depth = 1, }; @@ -398,6 +397,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) int cnt; for (cnt = 0; cnt < ha->max_req_queues; cnt++) { + if (!test_bit(cnt, ha->req_qid_map)) + continue; + req = ha->req_q_map[cnt]; qla2x00_free_req_que(ha, req); } @@ -405,6 +407,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) ha->req_q_map = NULL; for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { + if (!test_bit(cnt, ha->rsp_qid_map)) + continue; + rsp = ha->rsp_q_map[cnt]; qla2x00_free_rsp_que(ha, rsp); } @@ -656,7 +661,7 @@ qla2x00_sp_compl(void *data, void *ptr, int res) "SP reference-count to ZERO -- sp=%p cmd=%p.\n", sp, GET_CMD_SP(sp)); if (ql2xextended_error_logging & ql_dbg_io) - BUG(); + WARN_ON(atomic_read(&sp->ref_count) == 0); return; } if (!atomic_dec_and_test(&sp->ref_count)) @@ -958,8 +963,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) } ql_dbg(ql_dbg_taskm, vha, 0x8002, - "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p\n", - vha->host_no, id, lun, sp, cmd); + "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", + vha->host_no, id, lun, sp, cmd, sp->handle); /* Get a reference to the sp and drop the lock.*/ sp_get(sp); @@ -967,14 +972,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = ha->isp_ops->abort_command(sp); if (rval) { - if (rval == QLA_FUNCTION_PARAMETER_ERROR) { - /* - * Decrement the ref_count since we can't find the - * command - */ - atomic_dec(&sp->ref_count); + if (rval == QLA_FUNCTION_PARAMETER_ERROR) ret = SUCCESS; - } else + else ret = FAILED; ql_dbg(ql_dbg_taskm, vha, 0x8003, @@ -986,12 +986,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) } spin_lock_irqsave(&ha->hardware_lock, flags); - /* - * Clear the slot in the oustanding_cmds array if we can't find the - * command to reclaim the resources. - */ - if (rval == QLA_FUNCTION_PARAMETER_ERROR) - vha->req->outstanding_cmds[sp->handle] = NULL; sp->done(ha, sp, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2219,6 +2213,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; + case PCI_DEVICE_ID_QLOGIC_ISP2261: + ha->device_type |= DT_ISP2261; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; } if (IS_QLA82XX(ha)) @@ -2296,7 +2297,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || - pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) { + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) { bars = pci_select_bars(pdev, IORESOURCE_MEM); mem_only = 1; ql_dbg_pci(ql_dbg_init, pdev, 0x0007, @@ -2504,6 +2506,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_24XX; rsp_length = RESPONSE_ENTRY_CNT_2300; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; @@ -2973,7 +2976,6 @@ qla2x00_shutdown(struct pci_dev *pdev) static void qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) { - struct Scsi_Host *scsi_host; scsi_qla_host_t *vha; unsigned long flags; @@ -2984,7 +2986,7 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) BUG_ON(base_vha->list.next == &ha->vp_list); /* This assumes first entry in ha->vp_list is always base vha */ vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); - scsi_host = scsi_host_get(vha->host); + scsi_host_get(vha->host); spin_unlock_irqrestore(&ha->vport_slock, flags); mutex_unlock(&ha->vport_lock); @@ -3229,11 +3231,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, spin_lock_irqsave(vha->host->host_lock, flags); fcport->drport = rport; spin_unlock_irqrestore(vha->host->host_lock, flags); + qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen); set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); } else { - fc_remote_port_delete(rport); - qlt_fc_port_deleted(vha, fcport); + int now; + if (rport) + fc_remote_port_delete(rport); + qlt_do_generation_tick(vha, &now); + qlt_fc_port_deleted(vha, fcport, now); } } @@ -3270,9 +3276,10 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, if (!do_login) return; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + if (fcport->login_retry == 0) { fcport->login_retry = vha->hw->login_retry_count; - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); ql_dbg(ql_dbg_disc, vha, 0x2067, "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n", @@ -3763,8 +3770,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->vp_fcports); INIT_LIST_HEAD(&vha->work_list); INIT_LIST_HEAD(&vha->list); + INIT_LIST_HEAD(&vha->qla_cmd_list); + INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); spin_lock_init(&vha->work_lock); + spin_lock_init(&vha->cmd_list_lock); sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); ql_dbg(ql_dbg_init, vha, 0x0041, @@ -4418,7 +4428,10 @@ retry_lock2: void qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) { - uint16_t options = (requester_id << 15) | BIT_7, retry; +#if 0 + uint16_t options = (requester_id << 15) | BIT_7; +#endif + uint16_t retry; uint32_t data; struct qla_hw_data *ha = base_vha->hw; @@ -4454,6 +4467,7 @@ retry_unlock: return; +#if 0 /* XXX: IDC-unlock implementation using access-control mbx */ retry = 0; retry_unlock2: @@ -4469,6 +4483,7 @@ retry_unlock2: } return; +#endif } int @@ -4788,7 +4803,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) static int qla2x00_do_dpc(void *data) { - int rval; scsi_qla_host_t *base_vha; struct qla_hw_data *ha; @@ -5020,7 +5034,7 @@ loop_resync_check: if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &base_vha->dpc_flags))) { - rval = qla2x00_loop_resync(base_vha); + qla2x00_loop_resync(base_vha); clear_bit(LOOP_RESYNC_ACTIVE, &base_vha->dpc_flags); @@ -5704,6 +5718,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, { 0 }, }; MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); diff --git a/kernel/drivers/scsi/qla2xxx/qla_sup.c b/kernel/drivers/scsi/qla2xxx/qla_sup.c index 028e8c8a7..3272ed5bb 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_sup.c +++ b/kernel/drivers/scsi/qla2xxx/qla_sup.c @@ -316,7 +316,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha) wprot_old = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base)); stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base, - __constant_cpu_to_le16(0x1234), 100000); + cpu_to_le16(0x1234), 100000); wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base)); if (stat != QLA_SUCCESS || wprot != 0x1234) { /* Write enable. */ @@ -691,9 +691,9 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) region = (struct qla_flt_region *)&flt[1]; ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring, flt_addr << 2, OPTROM_BURST_SIZE); - if (*wptr == __constant_cpu_to_le16(0xffff)) + if (*wptr == cpu_to_le16(0xffff)) goto no_flash_data; - if (flt->version != __constant_cpu_to_le16(1)) { + if (flt->version != cpu_to_le16(1)) { ql_log(ql_log_warn, vha, 0x0047, "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", le16_to_cpu(flt->version), le16_to_cpu(flt->length), @@ -892,7 +892,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) fdt = (struct qla_fdt_layout *)req->ring; ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring, ha->flt_region_fdt << 2, OPTROM_BURST_SIZE); - if (*wptr == __constant_cpu_to_le16(0xffff)) + if (*wptr == cpu_to_le16(0xffff)) goto no_flash_data; if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || fdt->sig[3] != 'D') @@ -991,7 +991,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha) ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring, QLA82XX_IDC_PARAM_ADDR , 8); - if (*wptr == __constant_cpu_to_le32(0xffffffff)) { + if (*wptr == cpu_to_le32(0xffffffff)) { ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT; ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT; } else { @@ -1051,9 +1051,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); - if (hdr.version == __constant_cpu_to_le16(0xffff)) + if (hdr.version == cpu_to_le16(0xffff)) return; - if (hdr.version != __constant_cpu_to_le16(1)) { + if (hdr.version != cpu_to_le16(1)) { ql_dbg(ql_dbg_user, vha, 0x7090, "Unsupported NPIV-Config " "detected: version=0x%x entries=0x%x checksum=0x%x.\n", @@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha) { uint32_t led_select_value = 0; - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) goto out; if (ha->port_no == 0) diff --git a/kernel/drivers/scsi/qla2xxx/qla_target.c b/kernel/drivers/scsi/qla2xxx/qla_target.c index 496a733d0..75514a15b 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_target.c +++ b/kernel/drivers/scsi/qla2xxx/qla_target.c @@ -114,6 +114,10 @@ static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint16_t status, int qfull); static void qlt_disable_vha(struct scsi_qla_host *vha); static void qlt_clear_tgt_db(struct qla_tgt *tgt); +static void qlt_send_notify_ack(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *ntfy, + uint32_t add_flags, uint16_t resp_code, int resp_code_valid, + uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); /* * Global Variables */ @@ -123,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq; static DEFINE_MUTEX(qla_tgt_mutex); static LIST_HEAD(qla_tgt_glist); +/* This API intentionally takes dest as a parameter, rather than returning + * int value to avoid caller forgetting to issue wmb() after the store */ +void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); + *dest = atomic_inc_return(&base_vha->generation_tick); + /* memory barrier */ + wmb(); +} + /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ static struct qla_tgt_sess *qlt_find_sess_by_port_name( struct qla_tgt *tgt, @@ -382,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work) struct qla_tgt *tgt = sess->tgt; struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; + unsigned long flags; + bool logout_started = false; + fc_port_t fcport; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, + "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" + " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n", + __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, + sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, + sess->logout_on_delete, sess->keep_nport_handle, + sess->plogi_ack_needed); BUG_ON(!tgt); + + if (sess->logout_on_delete) { + int rc; + + memset(&fcport, 0, sizeof(fcport)); + fcport.loop_id = sess->loop_id; + fcport.d_id = sess->s_id; + memcpy(fcport.port_name, sess->port_name, WWN_SIZE); + fcport.vha = vha; + fcport.tgt_session = sess; + + rc = qla2x00_post_async_logout_work(vha, &fcport, NULL); + if (rc != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0xf085, + "Schedule logo failed sess %p rc %d\n", + sess, rc); + else + logout_started = true; + } + /* * Release the target session for FC Nexus from fabric module code. */ if (sess->se_sess != NULL) ha->tgt.tgt_ops->free_session(sess); + if (logout_started) { + bool traced = false; + + while (!ACCESS_ONCE(sess->logout_completed)) { + if (!traced) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, + "%s: waiting for sess %p logout\n", + __func__, sess); + traced = true; + } + msleep(100); + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087, + "%s: sess %p logout completed\n", + __func__, sess); + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + + if (sess->plogi_ack_needed) + qlt_send_notify_ack(vha, &sess->tm_iocb, + 0, 0, 0, 0, 0, 0); + + list_del(&sess->sess_list_entry); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, "Unregistration of sess %p finished\n", sess); @@ -410,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); - list_del(&sess->sess_list_entry); - if (sess->deleted) - list_del(&sess->del_list_entry); + if (!list_empty(&sess->del_list_entry)) + list_del_init(&sess->del_list_entry); + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; INIT_WORK(&sess->free_work, qlt_free_session_done); schedule_work(&sess->free_work); @@ -490,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, struct qla_tgt *tgt = sess->tgt; uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; - if (sess->deleted) - return; + if (sess->deleted) { + /* Upgrade to unconditional deletion in case it was temporary */ + if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING) + list_del(&sess->del_list_entry); + else + return; + } ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, "Scheduling sess %p for deletion\n", sess); - list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); - sess->deleted = 1; - if (immediate) + if (immediate) { dev_loss_tmo = 0; + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; + list_add(&sess->del_list_entry, &tgt->del_sess_list); + } else { + sess->deleted = QLA_SESS_DELETION_PENDING; + list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); + } sess->expires = jiffies + dev_loss_tmo * HZ; ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, - "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " - "deletion in %u secs (expires: %lu) immed: %d\n", - sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, - sess->expires, immediate); + "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)" + " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n", + sess->vha->vp_idx, sess->port_name, sess->loop_id, + sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, + dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete, + sess->generation); if (immediate) - schedule_delayed_work(&tgt->sess_del_work, 0); + mod_delayed_work(system_wq, &tgt->sess_del_work, 0); else schedule_delayed_work(&tgt->sess_del_work, sess->expires - jiffies); @@ -579,9 +663,9 @@ out_free_id_list: /* ha->hardware_lock supposed to be held on entry */ static void qlt_undelete_sess(struct qla_tgt_sess *sess) { - BUG_ON(!sess->deleted); + BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING); - list_del(&sess->del_list_entry); + list_del_init(&sess->del_list_entry); sess->deleted = 0; } @@ -600,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) del_list_entry); elapsed = jiffies; if (time_after_eq(elapsed, sess->expires)) { - qlt_undelete_sess(sess); + /* No turning back */ + list_del_init(&sess->del_list_entry); + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, "Timeout: sess %p about to be deleted\n", @@ -644,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess( fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id); + /* Cannot undelete at this point */ + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + spin_unlock_irqrestore(&ha->hardware_lock, + flags); + return NULL; + } + if (sess->deleted) qlt_undelete_sess(sess); @@ -653,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess( if (sess->local && !local) sess->local = 0; + + qlt_do_generation_tick(vha, &sess->generation); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return sess; @@ -674,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess( sess->s_id = fcport->d_id; sess->loop_id = fcport->loop_id; sess->local = local; + INIT_LIST_HEAD(&sess->del_list_entry); + + /* Under normal circumstances we want to logout from firmware when + * session eventually ends and release corresponding nport handle. + * In the exception cases (e.g. when new PLOGI is waiting) corresponding + * code will adjust these flags as necessary. */ + sess->logout_on_delete = 1; + sess->keep_nport_handle = 0; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", @@ -706,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess( spin_lock_irqsave(&ha->hardware_lock, flags); list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); vha->vha_tgt.qla_tgt->sess_count++; + qlt_do_generation_tick(vha, &sess->generation); spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, @@ -719,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess( } /* - * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() + * Called from qla2x00_reg_remote_port() */ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) { @@ -751,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) mutex_unlock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); + } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + /* Point of no return */ + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return; } else { kref_get(&sess->se_sess->sess_kref); @@ -781,7 +890,12 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) spin_unlock_irqrestore(&ha->hardware_lock, flags); } -void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) +/* + * max_gen - specifies maximum session generation + * at which this deletion requestion is still valid + */ +void +qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_sess *sess; @@ -800,6 +914,15 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) return; } + if (max_gen - sess->generation < 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, + "Ignoring stale deletion request for se_sess %p / sess %p" + " for port %8phC, req_gen %d, sess_gen %d\n", + sess->se_sess, sess, sess->port_name, max_gen, + sess->generation); + return; + } + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); sess->local = 1; @@ -1018,7 +1141,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { nack->u.isp24.flags = ntfy->u.isp24.flags & - __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); + cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); } nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; nack->u.isp24.status = ntfy->u.isp24.status; @@ -1076,7 +1199,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, resp->sof_type = abts->sof_type; resp->exchange_address = abts->exchange_address; resp->fcp_hdr_le = abts->fcp_hdr_le; - f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | + f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | F_CTL_LAST_SEQ | F_CTL_END_SEQ | F_CTL_SEQ_INITIATIVE); p = (uint8_t *)&f_ctl; @@ -1151,15 +1274,14 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, ctio->entry_count = 1; ctio->nport_handle = entry->nport_handle; ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; - ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio->vp_index = vha->vp_idx; ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; ctio->exchange_addr = entry->exchange_addr_to_abort; - ctio->u.status1.flags = - __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | - CTIO7_FLAGS_TERMINATE); + ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | + CTIO7_FLAGS_TERMINATE); ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); /* Memory Barrier */ @@ -1170,6 +1292,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, FCP_TMF_CMPL, true); } +static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) +{ + struct qla_tgt_sess_op *op; + struct qla_tgt_cmd *cmd; + + spin_lock(&vha->cmd_list_lock); + + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { + if (tag == op->atio.u.isp24.exchange_addr) { + op->aborted = true; + spin_unlock(&vha->cmd_list_lock); + return 1; + } + } + + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { + if (tag == cmd->atio.u.isp24.exchange_addr) { + cmd->state = QLA_TGT_STATE_ABORTED; + spin_unlock(&vha->cmd_list_lock); + return 1; + } + } + + spin_unlock(&vha->cmd_list_lock); + return 0; +} + +/* drop cmds for the given lun + * XXX only looks for cmds on the port through which lun reset was recieved + * XXX does not go through the list of other port (which may have cmds + * for the same lun) + */ +static void abort_cmds_for_lun(struct scsi_qla_host *vha, + uint32_t lun, uint8_t *s_id) +{ + struct qla_tgt_sess_op *op; + struct qla_tgt_cmd *cmd; + uint32_t key; + + key = sid_to_key(s_id); + spin_lock(&vha->cmd_list_lock); + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { + uint32_t op_key; + uint32_t op_lun; + + op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + op_lun = scsilun_to_int( + (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); + if (op_key == key && op_lun == lun) + op->aborted = true; + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { + uint32_t cmd_key; + uint32_t cmd_lun; + + cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); + cmd_lun = scsilun_to_int( + (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); + if (cmd_key == key && cmd_lun == lun) + cmd->state = QLA_TGT_STATE_ABORTED; + } + spin_unlock(&vha->cmd_list_lock); +} + /* ha->hardware_lock supposed to be held on entry */ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) @@ -1186,7 +1372,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - if (cmd->tag == abts->exchange_addr_to_abort) { + if (se_cmd->tag == abts->exchange_addr_to_abort) { lun = cmd->unpacked_lun; found_lun = true; break; @@ -1194,8 +1380,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, } spin_unlock(&se_sess->sess_cmd_lock); - if (!found_lun) - return -ENOENT; + /* cmd not in LIO lists, look in qla list */ + if (!found_lun) { + if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { + /* send TASK_ABORT response immediately */ + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false); + return 0; + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081, + "unable to find cmd in driver or LIO for tag 0x%x\n", + abts->exchange_addr_to_abort); + return -ENOENT; + } + } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, "qla_target(%d): task abort (tag=%d)\n", @@ -1279,6 +1476,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, return; } + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); + return; + } + rc = __qlt_24xx_handle_abts(vha, abts, sess); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, @@ -1319,20 +1521,19 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, ctio->entry_count = 1; ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; ctio->nport_handle = mcmd->sess->loop_id; - ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio->vp_index = ha->vp_idx; ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio->exchange_addr = atio->u.isp24.exchange_addr; ctio->u.status1.flags = (atio->u.isp24.attr << 9) | - __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | - CTIO7_FLAGS_SEND_STATUS); + cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); ctio->u.status1.ox_id = cpu_to_le16(temp); ctio->u.status1.scsi_status = - __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); - ctio->u.status1.response_len = __constant_cpu_to_le16(8); + cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); + ctio->u.status1.response_len = cpu_to_le16(8); ctio->u.status1.sense_data[0] = resp_code; /* Memory Barrier */ @@ -1583,7 +1784,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = prm->cmd->loop_id; - pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; @@ -1721,21 +1922,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, struct qla_hw_data *ha = vha->hw; struct se_cmd *se_cmd = &cmd->se_cmd; - if (unlikely(cmd->aborted)) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, - "qla_target(%d): terminating exchange " - "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd, - se_cmd, cmd->tag); - - cmd->state = QLA_TGT_STATE_ABORTED; - cmd->cmd_flags |= BIT_6; - - qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); - - /* !! At this point cmd could be already freed !! */ - return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; - } - prm->cmd = cmd; prm->tgt = tgt; prm->rq_result = scsi_status; @@ -1760,18 +1946,17 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { prm->residual = se_cmd->residual_count; ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c, - "Residual underflow: %d (tag %d, " - "op %x, bufflen %d, rq_result %x)\n", prm->residual, - cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, - cmd->bufflen, prm->rq_result); + "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", + prm->residual, se_cmd->tag, + se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, + cmd->bufflen, prm->rq_result); prm->rq_result |= SS_RESIDUAL_UNDER; } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { prm->residual = se_cmd->residual_count; ql_dbg(ql_dbg_io, vha, 0x305d, - "Residual overflow: %d (tag %d, " - "op %x, bufflen %d, rq_result %x)\n", prm->residual, - cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, - cmd->bufflen, prm->rq_result); + "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", + prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? + se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); prm->rq_result |= SS_RESIDUAL_OVER; } @@ -1844,7 +2029,7 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) == 50) { *xmit_type &= ~QLA_TGT_XMIT_STATUS; ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, - "Dropping cmd %p (tag %d) status", cmd, cmd->tag); + "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag); } #endif /* @@ -1868,7 +2053,7 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, "Cutting cmd %p (tag %d) buffer" " tail to len %d, sg_cnt %d (cmd->bufflen %d," - " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave, + " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave, cmd->bufflen, cmd->sg_cnt); cmd->bufflen = tot_len; @@ -1880,13 +2065,13 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, "Cutting cmd %p (tag %d) buffer head " - "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset, + "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset, cmd->bufflen); if (offset == 0) *xmit_type &= ~QLA_TGT_XMIT_DATA; else if (qlt_set_data_offset(cmd, offset)) { ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, - "qlt_set_data_offset() failed (tag %d)", cmd->tag); + "qlt_set_data_offset() failed (tag %d)", se_cmd->tag); } } } @@ -1900,10 +2085,9 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, { prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, (uint32_t)sizeof(ctio->u.status1.sense_data)); - ctio->u.status0.flags |= - __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); + ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { - ctio->u.status0.flags |= __constant_cpu_to_le16( + ctio->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_EXPLICIT_CONFORM | CTIO7_FLAGS_CONFORM_REQ); } @@ -1920,17 +2104,17 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, "non GOOD status\n"); goto skip_explict_conf; } - ctio->u.status1.flags |= __constant_cpu_to_le16( + ctio->u.status1.flags |= cpu_to_le16( CTIO7_FLAGS_EXPLICIT_CONFORM | CTIO7_FLAGS_CONFORM_REQ); } skip_explict_conf: ctio->u.status1.flags &= - ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); + ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); ctio->u.status1.flags |= - __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); + cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); ctio->u.status1.scsi_status |= - __constant_cpu_to_le16(SS_SENSE_LEN_VALID); + cpu_to_le16(SS_SENSE_LEN_VALID); ctio->u.status1.sense_length = cpu_to_le16(prm->sense_buffer_len); for (i = 0; i < prm->sense_buffer_len/4; i++) @@ -1950,9 +2134,9 @@ skip_explict_conf: #endif } else { ctio->u.status1.flags &= - ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); + ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); ctio->u.status1.flags |= - __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); + cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); ctio->u.status1.sense_length = 0; memset(ctio->u.status1.sense_data, 0, sizeof(ctio->u.status1.sense_data)); @@ -2074,7 +2258,6 @@ static inline int qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) { uint32_t *cur_dsd; - int sgc; uint32_t transfer_length = 0; uint32_t data_bytes; uint32_t dif_bytes; @@ -2091,7 +2274,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) struct atio_from_isp *atio = &prm->cmd->atio; uint16_t t16; - sgc = 0; ha = vha->hw; pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; @@ -2181,7 +2363,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = prm->cmd->loop_id; - pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; @@ -2197,9 +2379,9 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) /* Set transfer direction */ if (cmd->dma_data_direction == DMA_TO_DEVICE) - pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN); + pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); else if (cmd->dma_data_direction == DMA_FROM_DEVICE) - pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT); + pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); pkt->dseg_count = prm->tot_dsds; @@ -2251,11 +2433,11 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); - crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); + crc_ctx_pkt->guard_seed = cpu_to_le16(0); /* Walks data segments */ - pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR); + pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); if (!bundling && prm->prot_seg_cnt) { if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, @@ -2298,6 +2480,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, unsigned long flags = 0; int res; + spin_lock_irqsave(&ha->hardware_lock, flags); + if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + cmd->state = QLA_TGT_STATE_PROCESSED; + if (cmd->sess->logout_completed) + /* no need to terminate. FW already freed exchange. */ + qlt_abort_cmd_on_host_reset(cmd->vha, cmd); + else + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + memset(&prm, 0, sizeof(prm)); qlt_check_srr_debug(cmd, &xmit_type); @@ -2310,9 +2505,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); if (unlikely(res != 0)) { - if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) - return 0; - return res; } @@ -2351,7 +2543,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { pkt->u.status0.flags |= - __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | + cpu_to_le16(CTIO7_FLAGS_DATA_IN | CTIO7_FLAGS_STATUS_MODE_0); if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) @@ -2363,11 +2555,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, cpu_to_le16(prm.rq_result); pkt->u.status0.residual = cpu_to_le32(prm.residual); - pkt->u.status0.flags |= __constant_cpu_to_le16( + pkt->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_SEND_STATUS); if (qlt_need_explicit_conf(ha, cmd, 0)) { pkt->u.status0.flags |= - __constant_cpu_to_le16( + cpu_to_le16( CTIO7_FLAGS_EXPLICIT_CONFORM | CTIO7_FLAGS_CONFORM_REQ); } @@ -2395,12 +2587,12 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, ctio->entry_count = 1; ctio->entry_type = CTIO_TYPE7; ctio->dseg_count = 0; - ctio->u.status1.flags &= ~__constant_cpu_to_le16( + ctio->u.status1.flags &= ~cpu_to_le16( CTIO7_FLAGS_DATA_IN); /* Real finish is ctio_m1's finish */ pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; - pkt->u.status0.flags |= __constant_cpu_to_le16( + pkt->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_DONT_RET_CTIO); /* qlt_24xx_init_ctio_to_isp will correct @@ -2459,7 +2651,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) spin_lock_irqsave(&ha->hardware_lock, flags); - if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { + if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) || + (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) { /* * Either a chip reset is active or this request was from * previous life, just abort the processing. @@ -2489,7 +2682,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) } pkt = (struct ctio7_to_24xx *)prm.pkt; - pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | + pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | CTIO7_FLAGS_STATUS_MODE_0); if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) @@ -2564,7 +2757,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, /* Update protection tag */ if (cmd->prot_sg_cnt) { - uint32_t i, j = 0, k = 0, num_ent; + uint32_t i, k = 0, num_ent; struct scatterlist *sg, *sgl; @@ -2577,7 +2770,6 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, k += num_ent; continue; } - j = blocks_done - k - 1; k = blocks_done; break; } @@ -2650,6 +2842,89 @@ out: } +/* If hardware_lock held on entry, might drop it, then reaquire */ +/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ +static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *ntfy) +{ + struct nack_to_isp *nack; + struct qla_hw_data *ha = vha->hw; + request_t *pkt; + int ret = 0; + + ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, + "Sending TERM ELS CTIO (ha=%p)\n", ha); + + pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); + if (pkt == NULL) { + ql_dbg(ql_dbg_tgt, vha, 0xe080, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", vha->vp_idx, __func__); + return -ENOMEM; + } + + pkt->entry_type = NOTIFY_ACK_TYPE; + pkt->entry_count = 1; + pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + + nack = (struct nack_to_isp *)pkt; + nack->ox_id = ntfy->ox_id; + + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & + __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + + /* terminate */ + nack->u.isp24.flags |= + __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); + + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; + nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; + nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; + nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; + nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; + nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; + nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; + + qla2x00_start_iocbs(vha, vha->req); + return ret; +} + +static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *imm, int ha_locked) +{ + unsigned long flags = 0; + int rc; + + if (qlt_issue_marker(vha, ha_locked) < 0) + return; + + if (ha_locked) { + rc = __qlt_send_term_imm_notif(vha, imm); + +#if 0 /* Todo */ + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, imm, 0, 0); +#endif + goto done; + } + + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + rc = __qlt_send_term_imm_notif(vha, imm); + +#if 0 /* Todo */ + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, imm, 0, 0); +#endif + +done: + if (!ha_locked) + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +} + /* If hardware_lock held on entry, might drop it, then reaquire */ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, @@ -2688,14 +2963,14 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = CTIO_TYPE7; ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; - ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio24->vp_index = vha->vp_idx; ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio24->exchange_addr = atio->u.isp24.exchange_addr; ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | - __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | + cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); ctio24->u.status1.ox_id = cpu_to_le16(temp); @@ -2794,6 +3069,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) } +void qlt_abort_cmd(struct qla_tgt_cmd *cmd) +{ + struct qla_tgt *tgt = cmd->tgt; + struct scsi_qla_host *vha = tgt->vha; + struct se_cmd *se_cmd = &cmd->se_cmd; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, + "qla_target(%d): terminating exchange for aborted cmd=%p " + "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, + se_cmd->tag); + + cmd->state = QLA_TGT_STATE_ABORTED; + cmd->cmd_flags |= BIT_6; + + qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); +} +EXPORT_SYMBOL(qlt_abort_cmd); + void qlt_free_cmd(struct qla_tgt_cmd *cmd) { struct qla_tgt_sess *sess = cmd->sess; @@ -2917,7 +3210,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, if (ctio != NULL) { struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; term = !(c->flags & - __constant_cpu_to_le16(OF_TERM_EXCH)); + cpu_to_le16(OF_TERM_EXCH)); } else term = 1; @@ -3017,7 +3310,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) dump_stack(); } - cmd->cmd_flags |= BIT_12; + cmd->cmd_flags |= BIT_17; ha->tgt.tgt_ops->free_cmd(cmd); } @@ -3065,7 +3358,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, { struct qla_hw_data *ha = vha->hw; struct se_cmd *se_cmd; - const struct target_core_fabric_ops *tfo; struct qla_tgt_cmd *cmd; if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { @@ -3083,7 +3375,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, return; se_cmd = &cmd->se_cmd; - tfo = se_cmd->se_tfo; cmd->cmd_sent_to_fw = 0; qlt_unmap_sg(vha, cmd); @@ -3179,23 +3470,21 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, skip_term: if (cmd->state == QLA_TGT_STATE_PROCESSED) { - ; + cmd->cmd_flags |= BIT_12; } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { - int rx_status = 0; - cmd->state = QLA_TGT_STATE_DATA_IN; - if (unlikely(status != CTIO_SUCCESS)) - rx_status = -EIO; - else + if (status == CTIO_SUCCESS) cmd->write_data_transferred = 1; ha->tgt.tgt_ops->handle_data(cmd); return; } else if (cmd->state == QLA_TGT_STATE_ABORTED) { + cmd->cmd_flags |= BIT_18; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, - "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); + "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); } else { + cmd->cmd_flags |= BIT_19; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, "qla_target(%d): A command in state (%d) should " "not return a CTIO complete\n", vha->vp_idx, cmd->state); @@ -3207,7 +3496,6 @@ skip_term: dump_stack(); } - ha->tgt.tgt_ops->free_cmd(cmd); } @@ -3265,8 +3553,15 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) if (tgt->tgt_stop) goto out_term; + if (cmd->state == QLA_TGT_STATE_ABORTED) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, + "cmd with tag %u is aborted\n", + cmd->atio.u.isp24.exchange_addr); + goto out_term; + } + cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; - cmd->tag = atio->u.isp24.exchange_addr; + cmd->se_cmd.tag = atio->u.isp24.exchange_addr; cmd->unpacked_lun = scsilun_to_int( (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); @@ -3318,6 +3613,12 @@ out_term: static void qlt_do_work(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + scsi_qla_host_t *vha = cmd->vha; + unsigned long flags; + + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_del(&cmd->cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); __qlt_do_work(cmd); } @@ -3369,14 +3670,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work) unsigned long flags; uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_del(&op->cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + + if (op->aborted) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083, + "sess_op with tag %u is aborted\n", + op->atio.u.isp24.exchange_addr); + goto out_term; + } + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, - "qla_target(%d): Unable to find wwn login" - " (s_id %x:%x:%x), trying to create it manually\n", - vha->vp_idx, s_id[0], s_id[1], s_id[2]); + "qla_target(%d): Unable to find wwn login" + " (s_id %x:%x:%x), trying to create it manually\n", + vha->vp_idx, s_id[0], s_id[1], s_id[2]); if (op->atio.u.raw.entry_count > 1) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, - "Dropping multy entry atio %p\n", &op->atio); + "Dropping multy entry atio %p\n", &op->atio); goto out_term; } @@ -3441,10 +3753,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, memcpy(&op->atio, atio, sizeof(*atio)); op->vha = vha; + + spin_lock(&vha->cmd_list_lock); + list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list); + spin_unlock(&vha->cmd_list_lock); + INIT_WORK(&op->work, qlt_create_sess_from_atio); queue_work(qla_tgt_wq, &op->work); return 0; } + + /* Another WWN used to have our s_id. Our PLOGI scheduled its + * session deletion, but it's still in sess_del_work wq */ + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + ql_dbg(ql_dbg_io, vha, 0x3061, + "New command while old session %p is being deleted\n", + sess); + return -EFAULT; + } + /* * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. */ @@ -3460,6 +3787,11 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, cmd->cmd_in_wq = 1; cmd->cmd_flags |= BIT_0; + + spin_lock(&vha->cmd_list_lock); + list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); + spin_unlock(&vha->cmd_list_lock); + INIT_WORK(&cmd->work, qlt_do_work); queue_work(qla_tgt_wq, &cmd->work); return 0; @@ -3473,6 +3805,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_mgmt_cmd *mcmd; + struct atio_from_isp *a = (struct atio_from_isp *)iocb; int res; uint8_t tmr_func; @@ -3513,6 +3846,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); tmr_func = TMR_LUN_RESET; + abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); break; case QLA_TGT_CLEAR_TS: @@ -3582,12 +3916,11 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) struct qla_tgt *tgt; struct qla_tgt_sess *sess; uint32_t lun, unpacked_lun; - int lun_size, fn; + int fn; tgt = vha->vha_tgt.qla_tgt; lun = a->u.isp24.fcp_cmnd.lun; - lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, a->u.isp24.fcp_hdr.s_id); @@ -3601,6 +3934,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) sizeof(struct atio_from_isp)); } + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) + return -EFAULT; + return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); } @@ -3666,22 +4002,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha, return __qlt_abort_task(vha, iocb, sess); } +void qlt_logo_completion_handler(fc_port_t *fcport, int rc) +{ + if (fcport->tgt_session) { + if (rc != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, + "%s: se_sess %p / sess %p from" + " port %8phC loop_id %#04x s_id %02x:%02x:%02x" + " LOGO failed: %#x\n", + __func__, + fcport->tgt_session->se_sess, + fcport->tgt_session, + fcport->port_name, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, rc); + } + + fcport->tgt_session->logout_completed = 1; + } +} + +static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a, + struct imm_ntfy_from_isp *b) +{ + struct imm_ntfy_from_isp tmp; + memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp)); + memcpy(a, b, sizeof(struct imm_ntfy_from_isp)); + memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp)); +} + +/* +* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) +* +* Schedules sessions with matching port_id/loop_id but different wwn for +* deletion. Returns existing session with matching wwn if present. +* Null otherwise. +*/ +static struct qla_tgt_sess * +qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, + port_id_t port_id, uint16_t loop_id) +{ + struct qla_tgt_sess *sess = NULL, *other_sess; + uint64_t other_wwn; + + list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) { + + other_wwn = wwn_to_u64(other_sess->port_name); + + if (wwn == other_wwn) { + WARN_ON(sess); + sess = other_sess; + continue; + } + + /* find other sess with nport_id collision */ + if (port_id.b24 == other_sess->s_id.b24) { + if (loop_id != other_sess->loop_id) { + ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + /* + * logout_on_delete is set by default, but another + * session that has the same s_id/loop_id combo + * might have cleared it when requested this session + * deletion, so don't touch it + */ + qlt_schedule_sess_for_deletion(other_sess, true); + } else { + /* + * Another wwn used to have our s_id/loop_id + * combo - kill the session, but don't log out + */ + sess->logout_on_delete = 0; + qlt_schedule_sess_for_deletion(other_sess, + true); + } + continue; + } + + /* find other sess with nport handle collision */ + if (loop_id == other_sess->loop_id) { + ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + /* Same loop_id but different s_id + * Ok to kill and logout */ + qlt_schedule_sess_for_deletion(other_sess, true); + } + } + + return sess; +} + +/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ +static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) +{ + struct qla_tgt_sess_op *op; + struct qla_tgt_cmd *cmd; + uint32_t key; + int count = 0; + + key = (((u32)s_id->b.domain << 16) | + ((u32)s_id->b.area << 8) | + ((u32)s_id->b.al_pa)); + + spin_lock(&vha->cmd_list_lock); + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { + uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + if (op_key == key) { + op->aborted = true; + count++; + } + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { + uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); + if (cmd_key == key) { + cmd->state = QLA_TGT_STATE_ABORTED; + count++; + } + } + spin_unlock(&vha->cmd_list_lock); + + return count; +} + /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess = NULL; + uint64_t wwn; + port_id_t port_id; + uint16_t loop_id; + uint16_t wd3_lo; int res = 0; + wwn = wwn_to_u64(iocb->u.isp24.port_name); + + port_id.b.domain = iocb->u.isp24.port_id[2]; + port_id.b.area = iocb->u.isp24.port_id[1]; + port_id.b.al_pa = iocb->u.isp24.port_id[0]; + port_id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); + /* res = 1 means ack at the end of thread + * res = 0 means ack async/later. + */ switch (iocb->u.isp24.status_subcode) { case ELS_PLOGI: - case ELS_FLOGI: + + /* Mark all stale commands in qla_tgt_wq for deletion */ + abort_cmds_for_s_id(vha, &port_id); + + if (wwn) + sess = qlt_find_sess_invalidate_other(tgt, wwn, + port_id, loop_id); + + if (!sess || IS_SW_RESV_ADDR(sess->s_id)) { + res = 1; + break; + } + + if (sess->plogi_ack_needed) { + /* + * Initiator sent another PLOGI before last PLOGI could + * finish. Swap plogi iocbs and terminate old one + * without acking, new one will get acked when session + * deletion completes. + */ + ql_log(ql_log_warn, sess->vha, 0xf094, + "sess %p received double plogi.\n", sess); + + qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb); + + qlt_send_term_imm_notif(vha, iocb, 1); + + res = 0; + break; + } + + res = 0; + + /* + * Save immediate Notif IOCB for Ack when sess is done + * and being deleted. + */ + memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb)); + sess->plogi_ack_needed = 1; + + /* + * Under normal circumstances we want to release nport handle + * during LOGO process to avoid nport handle leaks inside FW. + * The exception is when LOGO is done while another PLOGI with + * the same nport handle is waiting as might be the case here. + * Note: there is always a possibily of a race where session + * deletion has already started for other reasons (e.g. ACL + * removal) and now PLOGI arrives: + * 1. if PLOGI arrived in FW after nport handle has been freed, + * FW must have assigned this PLOGI a new/same handle and we + * can proceed ACK'ing it as usual when session deletion + * completes. + * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT + * bit reached it, the handle has now been released. We'll + * get an error when we ACK this PLOGI. Nothing will be sent + * back to initiator. Initiator should eventually retry + * PLOGI and situation will correct itself. + */ + sess->keep_nport_handle = ((sess->loop_id == loop_id) && + (sess->s_id.b24 == port_id.b24)); + qlt_schedule_sess_for_deletion(sess, true); + break; + case ELS_PRLI: + wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); + + if (wwn) + sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id, + loop_id); + + if (sess != NULL) { + if (sess->deleted) { + /* + * Impatient initiator sent PRLI before last + * PLOGI could finish. Will force him to re-try, + * while last one finishes. + */ + ql_log(ql_log_warn, sess->vha, 0xf095, + "sess %p PRLI received, before plogi ack.\n", + sess); + qlt_send_term_imm_notif(vha, iocb, 1); + res = 0; + break; + } + + /* + * This shouldn't happen under normal circumstances, + * since we have deleted the old session during PLOGI + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, + "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", + sess->loop_id, sess, iocb->u.isp24.nport_handle); + + sess->local = 0; + sess->loop_id = loop_id; + sess->s_id = port_id; + + if (wd3_lo & BIT_7) + sess->conf_compl_supported = 1; + + } + res = 1; /* send notify ack */ + + /* Make session global (not used in fabric mode) */ + if (ha->current_topology != ISP_CFG_F) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else { + /* todo: else - create sess here. */ + res = 1; /* send notify ack */ + } + + break; + case ELS_LOGO: case ELS_PRLO: res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); @@ -3699,6 +4293,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, break; } + case ELS_FLOGI: /* should never happen */ default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, "qla_target(%d): Unsupported ELS command %x " @@ -3712,6 +4307,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) { +#if 1 + /* + * FIXME: Reject non zero SRR relative offset until we can test + * this code properly. + */ + pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); + return -1; +#else struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; size_t first_offset = 0, rem_offset = offset, tmp = 0; int i, sg_srr_cnt, bufflen = 0; @@ -3721,13 +4324,6 @@ static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) "cmd->sg_cnt: %u, direction: %d\n", cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); - /* - * FIXME: Reject non zero SRR relative offset until we can test - * this code properly. - */ - pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); - return -1; - if (!cmd->sg || !cmd->sg_cnt) { ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, "Missing cmd->sg or zero cmd->sg_cnt in" @@ -3810,6 +4406,7 @@ static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) BUG(); return 0; +#endif } static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, @@ -3891,9 +4488,8 @@ static void qlt_handle_srr(struct scsi_qla_host *vha, resp = 1; } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, - "qla_target(%d): SRR for in data for cmd " - "without them (tag %d, SCSI status %d), " - "reject", vha->vp_idx, cmd->tag, + "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject", + vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status); goto out_reject; } @@ -3927,10 +4523,8 @@ static void qlt_handle_srr(struct scsi_qla_host *vha, } } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, - "qla_target(%d): SRR for out data for cmd " - "without them (tag %d, SCSI status %d), " - "reject", vha->vp_idx, cmd->tag, - cmd->se_cmd.scsi_status); + "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject", + vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status); goto out_reject; } break; @@ -3971,16 +4565,20 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; +#ifndef __CHECKER__ if (!ha_locked) spin_lock_irqsave(&ha->hardware_lock, flags); +#endif qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_REJECT, NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); +#ifndef __CHECKER__ if (!ha_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); +#endif kfree(imm); } @@ -4051,10 +4649,9 @@ restart: cmd->sg = se_cmd->t_data_sg; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, - "SRR cmd %p (se_cmd %p, tag %d, op %x), " - "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, - se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, - cmd->sg_cnt, cmd->offset); + "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d", + cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ? + se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset); qlt_handle_srr(vha, sctio, imm); @@ -4325,14 +4922,14 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = CTIO_TYPE7; ctio24->nport_handle = sess->loop_id; - ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio24->vp_index = vha->vp_idx; ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; ctio24->exchange_addr = atio->u.isp24.exchange_addr; ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | - __constant_cpu_to_le16( + cpu_to_le16( CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | CTIO7_FLAGS_DONT_RET_CTIO); /* @@ -4660,7 +5257,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) struct atio_from_isp *atio = (struct atio_from_isp *)pkt; int rc; if (atio->u.isp2x.status != - __constant_cpu_to_le16(ATIO_CDB_VALID)) { + cpu_to_le16(ATIO_CDB_VALID)) { ql_dbg(ql_dbg_tgt, vha, 0xe05e, "qla_target(%d): ATIO with error " "status %x received\n", vha->vp_idx, @@ -4734,7 +5331,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) le16_to_cpu(entry->u.isp2x.status)); tgt->notify_ack_expected--; if (entry->u.isp2x.status != - __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) { + cpu_to_le16(NOTIFY_ACK_SUCCESS)) { ql_dbg(ql_dbg_tgt, vha, 0xe061, "qla_target(%d): NOTIFY_ACK " "failed %x\n", vha->vp_idx, @@ -5016,6 +5613,11 @@ static void qlt_abort_work(struct qla_tgt *tgt, if (!sess) goto out_term; } else { + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + sess = NULL; + goto out_term; + } + kref_get(&sess->se_sess->sess_kref); } @@ -5048,7 +5650,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, uint8_t *s_id = NULL; /* to hide compiler warnings */ int rc; uint32_t lun, unpacked_lun; - int lun_size, fn; + int fn; void *iocb; spin_lock_irqsave(&ha->hardware_lock, flags); @@ -5070,12 +5672,16 @@ static void qlt_tmr_work(struct qla_tgt *tgt, if (!sess) goto out_term; } else { + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + sess = NULL; + goto out_term; + } + kref_get(&sess->se_sess->sess_kref); } iocb = a; lun = a->u.isp24.fcp_cmnd.lun; - lun_size = sizeof(lun); fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); @@ -5556,6 +6162,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) /* Adjust ring index */ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); + RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha)); } void @@ -5598,19 +6205,19 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) ha->tgt.saved_set = 1; } - nv->exchange_count = __constant_cpu_to_le16(0xFFFF); + nv->exchange_count = cpu_to_le16(0xFFFF); /* Enable target mode */ - nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); + nv->firmware_options_1 |= cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ if (!qla_ini_mode_enabled(vha)) - nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); + nv->firmware_options_1 |= cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ - nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); + nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Enable initial LIP */ - nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); + nv->firmware_options_1 &= cpu_to_le32(~BIT_9); if (ql2xtgt_tape_enable) /* Enable FC Tape support */ nv->firmware_options_2 |= cpu_to_le32(BIT_12); @@ -5619,9 +6226,9 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) nv->firmware_options_2 &= cpu_to_le32(~BIT_12); /* Disable Full Login after LIP */ - nv->host_p &= __constant_cpu_to_le32(~BIT_10); + nv->host_p &= cpu_to_le32(~BIT_10); /* Enable target PRLI control */ - nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); + nv->firmware_options_2 |= cpu_to_le32(BIT_14); } else { if (ha->tgt.saved_set) { nv->exchange_count = ha->tgt.saved_exchange_count; @@ -5643,12 +6250,12 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) fc_host_supported_classes(vha->host) = FC_COS_CLASS2 | FC_COS_CLASS3; - nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); + nv->firmware_options_2 |= cpu_to_le32(BIT_8); } else { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS3; - nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); + nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); } } @@ -5660,7 +6267,7 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, if (ha->tgt.node_name_set) { memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); - icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); + icb->firmware_options_1 |= cpu_to_le32(BIT_14); } } @@ -5685,20 +6292,19 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) ha->tgt.saved_set = 1; } - nv->exchange_count = __constant_cpu_to_le16(0xFFFF); + nv->exchange_count = cpu_to_le16(0xFFFF); /* Enable target mode */ - nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); + nv->firmware_options_1 |= cpu_to_le32(BIT_4); /* Disable ini mode, if requested */ if (!qla_ini_mode_enabled(vha)) - nv->firmware_options_1 |= - __constant_cpu_to_le32(BIT_5); + nv->firmware_options_1 |= cpu_to_le32(BIT_5); /* Disable Full Login after LIP */ - nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); + nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Enable initial LIP */ - nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); + nv->firmware_options_1 &= cpu_to_le32(~BIT_9); if (ql2xtgt_tape_enable) /* Enable FC tape support */ nv->firmware_options_2 |= cpu_to_le32(BIT_12); @@ -5707,9 +6313,9 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) nv->firmware_options_2 &= cpu_to_le32(~BIT_12); /* Disable Full Login after LIP */ - nv->host_p &= __constant_cpu_to_le32(~BIT_10); + nv->host_p &= cpu_to_le32(~BIT_10); /* Enable target PRLI control */ - nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); + nv->firmware_options_2 |= cpu_to_le32(BIT_14); } else { if (ha->tgt.saved_set) { nv->exchange_count = ha->tgt.saved_exchange_count; @@ -5731,12 +6337,12 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) fc_host_supported_classes(vha->host) = FC_COS_CLASS2 | FC_COS_CLASS3; - nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); + nv->firmware_options_2 |= cpu_to_le32(BIT_8); } else { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = FC_COS_CLASS3; - nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); + nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); } } @@ -5751,7 +6357,7 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, if (ha->tgt.node_name_set) { memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); - icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); + icb->firmware_options_1 |= cpu_to_le32(BIT_14); } } @@ -5797,7 +6403,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) if (!QLA_TGT_MODE_ENABLED()) return; - if (ha->mqenable || IS_QLA83XX(ha)) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; } else { diff --git a/kernel/drivers/scsi/qla2xxx/qla_target.h b/kernel/drivers/scsi/qla2xxx/qla_target.h index 332086776..bca584ae4 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_target.h +++ b/kernel/drivers/scsi/qla2xxx/qla_target.h @@ -167,7 +167,24 @@ struct imm_ntfy_from_isp { uint32_t srr_rel_offs; uint16_t srr_ui; uint16_t srr_ox_id; - uint8_t reserved_4[19]; + union { + struct { + uint8_t node_name[8]; + } plogi; /* PLOGI/ADISC/PDISC */ + struct { + /* PRLI word 3 bit 0-15 */ + uint16_t wd3_lo; + uint8_t resv0[6]; + } prli; + struct { + uint8_t port_id[3]; + uint8_t resv1; + uint16_t nport_handle; + uint16_t resv2; + } req_els; + } u; + uint8_t port_name[8]; + uint8_t resv3[3]; uint8_t vp_index; uint32_t reserved_5; uint8_t port_id[3]; @@ -234,6 +251,7 @@ struct nack_to_isp { uint8_t reserved[2]; uint16_t ox_id; } __packed; +#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3 #define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 #define NOTIFY_ACK_SRR_FLAGS_REJECT 1 @@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *); #define FC_TM_REJECT 4 #define FC_TM_FAILED 5 -/* - * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was - * terminated, so no more actions is needed and success should be returned - * to target. - */ -#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717 - #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G) #define pci_dma_lo32(a) (a & 0xffffffff) #define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff) @@ -874,6 +885,15 @@ struct qla_tgt_sess_op { struct scsi_qla_host *vha; struct atio_from_isp atio; struct work_struct work; + struct list_head cmd_list; + bool aborted; +}; + +enum qla_sess_deletion { + QLA_SESS_DELETION_NONE = 0, + QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of + * this one */ + QLA_SESS_DELETION_IN_PROGRESS = 2, }; /* @@ -884,8 +904,15 @@ struct qla_tgt_sess { port_id_t s_id; unsigned int conf_compl_supported:1; - unsigned int deleted:1; + unsigned int deleted:2; unsigned int local:1; + unsigned int logout_on_delete:1; + unsigned int plogi_ack_needed:1; + unsigned int keep_nport_handle:1; + + unsigned char logout_completed; + + int generation; struct se_session *se_sess; struct scsi_qla_host *vha; @@ -897,6 +924,10 @@ struct qla_tgt_sess { uint8_t port_name[WWN_SIZE]; struct work_struct free_work; + + union { + struct imm_ntfy_from_isp tm_iocb; + }; }; struct qla_tgt_cmd { @@ -912,7 +943,6 @@ struct qla_tgt_cmd { unsigned int conf_compl_supported:1; unsigned int sg_mapped:1; unsigned int free_sg:1; - unsigned int aborted:1; /* Needed in case of SRR */ unsigned int write_data_transferred:1; unsigned int ctx_dsd_alloced:1; unsigned int q_full:1; @@ -924,7 +954,6 @@ struct qla_tgt_cmd { int sg_cnt; /* SG segments count */ int bufflen; /* cmd buffer length */ int offset; - uint32_t tag; uint32_t unpacked_lun; enum dma_data_direction dma_data_direction; uint32_t reset_count; @@ -962,6 +991,9 @@ struct qla_tgt_cmd { * BIT_14 - Back end data received/sent. * BIT_15 - SRR prepare ctio * BIT_16 - complete free + * BIT_17 - flush - qlt_abort_cmd_on_host_reset + * BIT_18 - completion w/abort status + * BIT_19 - completion w/unknown status */ uint32_t cmd_flags; }; @@ -1027,6 +1059,10 @@ struct qla_tgt_srr_ctio { struct qla_tgt_cmd *cmd; }; +/* Check for Switch reserved address */ +#define IS_SW_RESV_ADDR(_s_id) \ + ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc)) + #define QLA_TGT_XMIT_DATA 1 #define QLA_TGT_XMIT_STATUS 2 #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) @@ -1044,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64, extern void qlt_lport_deregister(struct scsi_qla_host *); extern void qlt_unreg_sess(struct qla_tgt_sess *); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); -extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); +extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); extern int __init qlt_init(void); extern void qlt_exit(void); extern void qlt_update_vp_map(struct scsi_qla_host *, int); @@ -1074,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha) ha->host->active_mode |= MODE_INITIATOR; } +static inline uint32_t sid_to_key(const uint8_t *s_id) +{ + uint32_t key; + + key = (((unsigned long)s_id[0] << 16) | + ((unsigned long)s_id[1] << 8) | + (unsigned long)s_id[2]); + return key; +} + /* * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. */ extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); +extern void qlt_abort_cmd(struct qla_tgt_cmd *); extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); @@ -1110,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *); extern irqreturn_t qla83xx_msix_atio_q(int, void *); extern void qlt_83xx_iospace_config(struct qla_hw_data *); extern int qlt_free_qfull_cmds(struct scsi_qla_host *); +extern void qlt_logo_completion_handler(fc_port_t *, int); +extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); #endif /* __QLA_TARGET_H */ diff --git a/kernel/drivers/scsi/qla2xxx/qla_tmpl.c b/kernel/drivers/scsi/qla2xxx/qla_tmpl.c index 962cb89fe..c3e622524 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/kernel/drivers/scsi/qla2xxx/qla_tmpl.c @@ -137,39 +137,39 @@ qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len) } static inline void -qla27xx_read8(void *window, void *buf, ulong *len) +qla27xx_read8(void __iomem *window, void *buf, ulong *len) { uint8_t value = ~0; if (buf) { - value = RD_REG_BYTE((__iomem void *)window); + value = RD_REG_BYTE(window); } qla27xx_insert32(value, buf, len); } static inline void -qla27xx_read16(void *window, void *buf, ulong *len) +qla27xx_read16(void __iomem *window, void *buf, ulong *len) { uint16_t value = ~0; if (buf) { - value = RD_REG_WORD((__iomem void *)window); + value = RD_REG_WORD(window); } qla27xx_insert32(value, buf, len); } static inline void -qla27xx_read32(void *window, void *buf, ulong *len) +qla27xx_read32(void __iomem *window, void *buf, ulong *len) { uint32_t value = ~0; if (buf) { - value = RD_REG_DWORD((__iomem void *)window); + value = RD_REG_DWORD(window); } qla27xx_insert32(value, buf, len); } -static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *) +static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *) { return (width == 1) ? qla27xx_read8 : @@ -181,7 +181,7 @@ static inline void qla27xx_read_reg(__iomem struct device_reg_24xx *reg, uint offset, void *buf, ulong *len) { - void *window = (void *)reg + offset; + void __iomem *window = (void __iomem *)reg + offset; qla27xx_read32(window, buf, len); } @@ -202,8 +202,8 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg, uint32_t addr, uint offset, uint count, uint width, void *buf, ulong *len) { - void *window = (void *)reg + offset; - void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width); + void __iomem *window = (void __iomem *)reg + offset; + void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width); qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf); while (count--) { @@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { for (i = 0; i < vha->hw->max_req_queues; i++) { struct req_que *req = vha->hw->req_q_map[i]; + + if (!test_bit(i, vha->hw->req_qid_map)) + continue; + if (req || !buf) { length = req ? req->length : REQUEST_ENTRY_CNT_24XX; @@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { for (i = 0; i < vha->hw->max_rsp_queues; i++) { struct rsp_que *rsp = vha->hw->rsp_q_map[i]; + + if (!test_bit(i, vha->hw->rsp_qid_map)) + continue; + if (rsp || !buf) { length = rsp ? rsp->length : RESPONSE_ENTRY_CNT_MQ; @@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { for (i = 0; i < vha->hw->max_req_queues; i++) { struct req_que *req = vha->hw->req_q_map[i]; + + if (!test_bit(i, vha->hw->req_qid_map)) + continue; + if (req || !buf) { qla27xx_insert16(i, buf, len); qla27xx_insert16(1, buf, len); @@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { for (i = 0; i < vha->hw->max_rsp_queues; i++) { struct rsp_que *rsp = vha->hw->rsp_q_map[i]; + + if (!test_bit(i, vha->hw->rsp_qid_map)) + continue; + if (rsp || !buf) { qla27xx_insert16(i, buf, len); qla27xx_insert16(1, buf, len); @@ -805,9 +821,8 @@ static void qla27xx_driver_info(struct qla27xx_fwdt_template *tmp) { uint8_t v[] = { 0, 0, 0, 0, 0, 0 }; - int rval = 0; - rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu", + sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu", v+0, v+1, v+2, v+3, v+4, v+5); tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]; @@ -940,8 +955,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) { ulong flags = 0; +#ifndef __CHECKER__ if (!hardware_locked) spin_lock_irqsave(&vha->hw->hardware_lock, flags); +#endif if (!vha->hw->fw_dump) ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n"); @@ -954,6 +971,8 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) else qla27xx_execute_fwdt_template(vha); +#ifndef __CHECKER__ if (!hardware_locked) spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +#endif } diff --git a/kernel/drivers/scsi/qla2xxx/qla_version.h b/kernel/drivers/scsi/qla2xxx/qla_version.h index 2ed9ab90a..6d31faa8c 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_version.h +++ b/kernel/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.07.00.18-k" +#define QLA2XXX_VERSION "8.07.00.26-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 7 diff --git a/kernel/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/kernel/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 5c9e680aa..81af294f1 100644 --- a/kernel/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/kernel/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -42,9 +43,6 @@ #include #include #include -#include -#include -#include #include "qla_def.h" #include "qla_target.h" @@ -53,9 +51,6 @@ static struct workqueue_struct *tcm_qla2xxx_free_wq; static struct workqueue_struct *tcm_qla2xxx_cmd_wq; -static const struct target_core_fabric_ops tcm_qla2xxx_ops; -static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops; - /* * Parse WWN. * If strict, we require lower-case hex and colon separators to be sure @@ -190,23 +185,6 @@ static char *tcm_qla2xxx_npiv_get_fabric_name(void) return "qla2xxx_npiv"; } -static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - struct tcm_qla2xxx_lport *lport = tpg->lport; - u8 proto_id; - - switch (lport->lport_proto_id) { - case SCSI_PROTOCOL_FCP: - default: - proto_id = fc_get_fabric_proto_ident(se_tpg); - break; - } - - return proto_id; -} - static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, @@ -223,78 +201,6 @@ static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) return tpg->lport_tpgt; } -static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg) -{ - return 1; -} - -static u32 tcm_qla2xxx_get_pr_transport_id( - struct se_portal_group *se_tpg, - struct se_node_acl *se_nacl, - struct t10_pr_registration *pr_reg, - int *format_code, - unsigned char *buf) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - struct tcm_qla2xxx_lport *lport = tpg->lport; - int ret = 0; - - switch (lport->lport_proto_id) { - case SCSI_PROTOCOL_FCP: - default: - ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, - format_code, buf); - break; - } - - return ret; -} - -static u32 tcm_qla2xxx_get_pr_transport_id_len( - struct se_portal_group *se_tpg, - struct se_node_acl *se_nacl, - struct t10_pr_registration *pr_reg, - int *format_code) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - struct tcm_qla2xxx_lport *lport = tpg->lport; - int ret = 0; - - switch (lport->lport_proto_id) { - case SCSI_PROTOCOL_FCP: - default: - ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, - format_code); - break; - } - - return ret; -} - -static char *tcm_qla2xxx_parse_pr_out_transport_id( - struct se_portal_group *se_tpg, - const char *buf, - u32 *out_tid_len, - char **port_nexus_ptr) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - struct tcm_qla2xxx_lport *lport = tpg->lport; - char *tid = NULL; - - switch (lport->lport_proto_id) { - case SCSI_PROTOCOL_FCP: - default: - tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, - port_nexus_ptr); - break; - } - - return tid; -} - static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, @@ -343,29 +249,6 @@ static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg) return tpg->tpg_attrib.fabric_prot_type; } -static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( - struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_nacl *nacl; - - nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL); - if (!nacl) { - pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n"); - return NULL; - } - - return &nacl->se_node_acl; -} - -static void tcm_qla2xxx_release_fabric_acl( - struct se_portal_group *se_tpg, - struct se_node_acl *se_nacl) -{ - struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, - struct tcm_qla2xxx_nacl, se_node_acl); - kfree(nacl); -} - static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, @@ -429,7 +312,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) cmd->cmd_flags |= BIT_14; } - return target_put_sess_cmd(se_cmd->se_sess, se_cmd); + return target_put_sess_cmd(se_cmd); } /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying @@ -489,7 +372,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - + cmd->cmd_flags |= BIT_3; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -520,7 +403,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, - 3000); + 3 * HZ); return 0; } spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); @@ -533,21 +416,14 @@ static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl) return; } -static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd) -{ - struct qla_tgt_cmd *cmd; - - /* check for task mgmt cmd */ - if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) - return 0xffffffff; - - cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - - return cmd->tag; -} - static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) { + if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { + struct qla_tgt_cmd *cmd = container_of(se_cmd, + struct qla_tgt_cmd, se_cmd); + return cmd->state; + } + return 0; } @@ -669,12 +545,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) cmd->cmd_flags |= BIT_4; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); cmd->sg_cnt = se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; cmd->offset = 0; - cmd->cmd_flags |= BIT_3; cmd->prot_sg_cnt = se_cmd->t_prot_nents; cmd->prot_sg = se_cmd->t_prot_sg; @@ -699,7 +573,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd->sg_cnt = 0; cmd->offset = 0; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); if (cmd->cmd_flags & BIT_5) { pr_crit("Bit_5 already set for cmd = %p.\n", cmd); dump_stack(); @@ -764,14 +637,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - struct scsi_qla_host *vha = cmd->vha; - struct qla_hw_data *ha = vha->hw; - - if (!cmd->sg_mapped) - return; - - pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); - cmd->sg_mapped = 0; + qlt_abort_cmd(cmd); } static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, @@ -826,17 +692,6 @@ static void tcm_qla2xxx_release_session(struct kref *kref) qlt_unreg_sess(se_sess->fabric_sess_ptr); } -static void tcm_qla2xxx_put_session(struct se_session *se_sess) -{ - struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; - struct qla_hw_data *ha = sess->vha->hw; - unsigned long flags; - - spin_lock_irqsave(&ha->hardware_lock, flags); - kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session); - spin_unlock_irqrestore(&ha->hardware_lock, flags); -} - static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) { if (!sess) @@ -852,76 +707,43 @@ static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) target_sess_cmd_list_set_waiting(sess->se_sess); } -static struct se_node_acl *tcm_qla2xxx_make_nodeacl( - struct se_portal_group *se_tpg, - struct config_group *group, - const char *name) +static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl, + const char *name) { - struct se_node_acl *se_nacl, *se_nacl_new; - struct tcm_qla2xxx_nacl *nacl; + struct tcm_qla2xxx_nacl *nacl = + container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); u64 wwnn; - u32 qla2xxx_nexus_depth; if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) - return ERR_PTR(-EINVAL); - - se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg); - if (!se_nacl_new) - return ERR_PTR(-ENOMEM); -/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */ - qla2xxx_nexus_depth = 1; + return -EINVAL; - /* - * se_nacl_new may be released by core_tpg_add_initiator_node_acl() - * when converting a NodeACL from demo mode -> explict - */ - se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, - name, qla2xxx_nexus_depth); - if (IS_ERR(se_nacl)) { - tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); - return se_nacl; - } - /* - * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN - */ - nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); nacl->nport_wwnn = wwnn; tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); - return se_nacl; -} - -static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl) -{ - struct se_portal_group *se_tpg = se_acl->se_tpg; - struct tcm_qla2xxx_nacl *nacl = container_of(se_acl, - struct tcm_qla2xxx_nacl, se_node_acl); - - core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1); - kfree(nacl); + return 0; } /* Start items for tcm_qla2xxx_tpg_attrib_cit */ #define DEF_QLA_TPG_ATTRIB(name) \ \ -static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \ - struct se_portal_group *se_tpg, \ - char *page) \ +static ssize_t tcm_qla2xxx_tpg_attrib_##name##_show( \ + struct config_item *item, char *page) \ { \ + struct se_portal_group *se_tpg = attrib_to_tpg(item); \ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ struct tcm_qla2xxx_tpg, se_tpg); \ \ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \ } \ \ -static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ - struct se_portal_group *se_tpg, \ - const char *page, \ - size_t count) \ +static ssize_t tcm_qla2xxx_tpg_attrib_##name##_store( \ + struct config_item *item, const char *page, size_t count) \ { \ + struct se_portal_group *se_tpg = attrib_to_tpg(item); \ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ struct tcm_qla2xxx_tpg, se_tpg); \ + struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ unsigned long val; \ int ret; \ \ @@ -931,81 +753,39 @@ static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ " ret: %d\n", ret); \ return -EINVAL; \ } \ - ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \ - \ - return (!ret) ? count : -EINVAL; \ -} - -#define DEF_QLA_TPG_ATTR_BOOL(_name) \ - \ -static int tcm_qla2xxx_set_attrib_##_name( \ - struct tcm_qla2xxx_tpg *tpg, \ - unsigned long val) \ -{ \ - struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ \ if ((val != 0) && (val != 1)) { \ pr_err("Illegal boolean value %lu\n", val); \ return -EINVAL; \ } \ \ - a->_name = val; \ - return 0; \ -} - -#define QLA_TPG_ATTR(_name, _mode) \ - TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode); + a->name = val; \ + \ + return count; \ +} \ +CONFIGFS_ATTR(tcm_qla2xxx_tpg_attrib_, name) -/* - * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls - */ -DEF_QLA_TPG_ATTR_BOOL(generate_node_acls); DEF_QLA_TPG_ATTRIB(generate_node_acls); -QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR); - -/* - Define tcm_qla2xxx_attrib_s_cache_dynamic_acls - */ -DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls); DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); -QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR); - -/* - * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect - */ -DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect); DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); -QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR); - -/* - * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect - */ -DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect); DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); -QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); - -/* - * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only - */ -DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only); DEF_QLA_TPG_ATTRIB(demo_mode_login_only); -QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR); static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { - &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr, - &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr, - &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr, - &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr, - &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr, + &tcm_qla2xxx_tpg_attrib_attr_generate_node_acls, + &tcm_qla2xxx_tpg_attrib_attr_cache_dynamic_acls, + &tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect, + &tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect, + &tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only, NULL, }; /* End items for tcm_qla2xxx_tpg_attrib_cit */ -static ssize_t tcm_qla2xxx_tpg_show_enable( - struct se_portal_group *se_tpg, - char *page) +static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item, + char *page) { + struct se_portal_group *se_tpg = to_tpg(item); struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); @@ -1041,11 +821,10 @@ static void tcm_qla2xxx_undepend_tpg(struct work_struct *work) complete(&base_tpg->tpg_base_comp); } -static ssize_t tcm_qla2xxx_tpg_store_enable( - struct se_portal_group *se_tpg, - const char *page, - size_t count) +static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, + const char *page, size_t count) { + struct se_portal_group *se_tpg = to_tpg(item); struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); unsigned long op; @@ -1085,22 +864,16 @@ static ssize_t tcm_qla2xxx_tpg_store_enable( return count; } -TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR); - -static ssize_t tcm_qla2xxx_tpg_show_dynamic_sessions( - struct se_portal_group *se_tpg, - char *page) +static ssize_t tcm_qla2xxx_tpg_dynamic_sessions_show(struct config_item *item, + char *page) { - return target_show_dynamic_sessions(se_tpg, page); + return target_show_dynamic_sessions(to_tpg(item), page); } -TF_TPG_BASE_ATTR_RO(tcm_qla2xxx, dynamic_sessions); - -static ssize_t tcm_qla2xxx_tpg_store_fabric_prot_type( - struct se_portal_group *se_tpg, - const char *page, - size_t count) +static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_store(struct config_item *item, + const char *page, size_t count) { + struct se_portal_group *se_tpg = to_tpg(item); struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); unsigned long val; @@ -1119,21 +892,24 @@ static ssize_t tcm_qla2xxx_tpg_store_fabric_prot_type( return count; } -static ssize_t tcm_qla2xxx_tpg_show_fabric_prot_type( - struct se_portal_group *se_tpg, - char *page) +static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item, + char *page) { + struct se_portal_group *se_tpg = to_tpg(item); struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); } -TF_TPG_BASE_ATTR(tcm_qla2xxx, fabric_prot_type, S_IRUGO | S_IWUSR); + +CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable); +CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); +CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { - &tcm_qla2xxx_tpg_enable.attr, - &tcm_qla2xxx_tpg_dynamic_sessions.attr, - &tcm_qla2xxx_tpg_fabric_prot_type.attr, + &tcm_qla2xxx_tpg_attr_enable, + &tcm_qla2xxx_tpg_attr_dynamic_sessions, + &tcm_qla2xxx_tpg_attr_fabric_prot_type, NULL, }; @@ -1174,8 +950,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( tpg->tpg_attrib.cache_dynamic_acls = 1; tpg->tpg_attrib.demo_mode_login_only = 1; - ret = core_tpg_register(&tcm_qla2xxx_ops, wwn, - &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); + ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); if (ret < 0) { kfree(tpg); return NULL; @@ -1207,18 +982,16 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) kfree(tpg); } -static ssize_t tcm_qla2xxx_npiv_tpg_show_enable( - struct se_portal_group *se_tpg, - char *page) +static ssize_t tcm_qla2xxx_npiv_tpg_enable_show(struct config_item *item, + char *page) { - return tcm_qla2xxx_tpg_show_enable(se_tpg, page); + return tcm_qla2xxx_tpg_enable_show(item, page); } -static ssize_t tcm_qla2xxx_npiv_tpg_store_enable( - struct se_portal_group *se_tpg, - const char *page, - size_t count) +static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item, + const char *page, size_t count) { + struct se_portal_group *se_tpg = to_tpg(item); struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; struct tcm_qla2xxx_lport *lport = container_of(se_wwn, struct tcm_qla2xxx_lport, lport_wwn); @@ -1254,10 +1027,10 @@ static ssize_t tcm_qla2xxx_npiv_tpg_store_enable( return count; } -TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR); +CONFIGFS_ATTR(tcm_qla2xxx_npiv_tpg_, enable); static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = { - &tcm_qla2xxx_npiv_tpg_enable.attr, + &tcm_qla2xxx_npiv_tpg_attr_enable, NULL, }; @@ -1294,8 +1067,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( tpg->tpg_attrib.cache_dynamic_acls = 1; tpg->tpg_attrib.demo_mode_login_only = 1; - ret = core_tpg_register(&tcm_qla2xxx_npiv_ops, wwn, - &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); + ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); if (ret < 0) { kfree(tpg); return NULL; @@ -1323,9 +1095,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( return NULL; } - key = (((unsigned long)s_id[0] << 16) | - ((unsigned long)s_id[1] << 8) | - (unsigned long)s_id[2]); + key = sid_to_key(s_id); pr_debug("find_sess_by_s_id: 0x%06x\n", key); se_nacl = btree_lookup32(&lport->lport_fcport_map, key); @@ -1360,9 +1130,7 @@ static void tcm_qla2xxx_set_sess_by_s_id( void *slot; int rc; - key = (((unsigned long)s_id[0] << 16) | - ((unsigned long)s_id[1] << 8) | - (unsigned long)s_id[2]); + key = sid_to_key(s_id); pr_debug("set_sess_by_s_id: %06x\n", key); slot = btree_lookup32(&lport->lport_fcport_map, key); @@ -1541,9 +1309,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) struct qla_hw_data *ha = tgt->ha; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); struct se_session *se_sess; - struct se_node_acl *se_nacl; struct tcm_qla2xxx_lport *lport; - struct tcm_qla2xxx_nacl *nacl; BUG_ON(in_interrupt()); @@ -1553,8 +1319,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) dump_stack(); return; } - se_nacl = se_sess->se_node_acl; - nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); lport = vha->vha_tgt.target_lport_ptr; if (!lport) { @@ -1718,6 +1482,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, } sess->conf_compl_supported = conf_compl_supported; + + /* Reset logout parameters to default */ + sess->logout_on_delete = 1; + sess->keep_nport_handle = 0; } /* @@ -1858,7 +1626,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, (struct tcm_qla2xxx_lport *)target_lport_ptr; struct tcm_qla2xxx_lport *base_lport = (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; - struct tcm_qla2xxx_tpg *base_tpg; struct fc_vport_identifiers vport_id; if (!qla_tgt_mode_enabled(base_vha)) { @@ -1871,7 +1638,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, pr_err("qla2xxx base_lport or tpg_1 not available\n"); return -EPERM; } - base_tpg = base_lport->tpg_1; memset(&vport_id, 0, sizeof(vport_id)); vport_id.port_name = npiv_wwpn; @@ -1967,9 +1733,8 @@ static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) } -static ssize_t tcm_qla2xxx_wwn_show_attr_version( - struct target_fabric_configfs *tf, - char *page) +static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item, + char *page) { return sprintf(page, "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " @@ -1977,24 +1742,25 @@ static ssize_t tcm_qla2xxx_wwn_show_attr_version( utsname()->machine); } -TF_WWN_ATTR_RO(tcm_qla2xxx, version); +CONFIGFS_ATTR_RO(tcm_qla2xxx_wwn_, version); static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { - &tcm_qla2xxx_wwn_version.attr, + &tcm_qla2xxx_wwn_attr_version, NULL, }; static const struct target_core_fabric_ops tcm_qla2xxx_ops = { .module = THIS_MODULE, .name = "qla2xxx", + .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), + /* + * XXX: Limit assumes single page per scatter-gather-list entry. + * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096 + */ + .max_data_sg_nents = 1200, .get_fabric_name = tcm_qla2xxx_get_fabric_name, - .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, .tpg_get_tag = tcm_qla2xxx_get_tag, - .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, - .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, - .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, - .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, .tpg_check_demo_mode_write_protect = @@ -2003,12 +1769,9 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { tcm_qla2xxx_check_prod_write_protect, .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only, .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, - .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, - .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, .check_stop_free = tcm_qla2xxx_check_stop_free, .release_cmd = tcm_qla2xxx_release_cmd, - .put_session = tcm_qla2xxx_put_session, .shutdown_session = tcm_qla2xxx_shutdown_session, .close_session = tcm_qla2xxx_close_session, .sess_get_index = tcm_qla2xxx_sess_get_index, @@ -2016,7 +1779,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { .write_pending = tcm_qla2xxx_write_pending, .write_pending_status = tcm_qla2xxx_write_pending_status, .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, - .get_task_tag = tcm_qla2xxx_get_task_tag, .get_cmd_state = tcm_qla2xxx_get_cmd_state, .queue_data_in = tcm_qla2xxx_queue_data_in, .queue_status = tcm_qla2xxx_queue_status, @@ -2030,12 +1792,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { .fabric_drop_wwn = tcm_qla2xxx_drop_lport, .fabric_make_tpg = tcm_qla2xxx_make_tpg, .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, - .fabric_post_link = NULL, - .fabric_pre_unlink = NULL, - .fabric_make_np = NULL, - .fabric_drop_np = NULL, - .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, - .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, + .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, @@ -2045,26 +1802,19 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { .module = THIS_MODULE, .name = "qla2xxx_npiv", + .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, - .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, .tpg_get_tag = tcm_qla2xxx_get_tag, - .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, - .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, - .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, - .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_prod_write_protect, .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, - .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, - .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, .check_stop_free = tcm_qla2xxx_check_stop_free, .release_cmd = tcm_qla2xxx_release_cmd, - .put_session = tcm_qla2xxx_put_session, .shutdown_session = tcm_qla2xxx_shutdown_session, .close_session = tcm_qla2xxx_close_session, .sess_get_index = tcm_qla2xxx_sess_get_index, @@ -2072,7 +1822,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { .write_pending = tcm_qla2xxx_write_pending, .write_pending_status = tcm_qla2xxx_write_pending_status, .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, - .get_task_tag = tcm_qla2xxx_get_task_tag, .get_cmd_state = tcm_qla2xxx_get_cmd_state, .queue_data_in = tcm_qla2xxx_queue_data_in, .queue_status = tcm_qla2xxx_queue_status, @@ -2086,12 +1835,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, - .fabric_post_link = NULL, - .fabric_pre_unlink = NULL, - .fabric_make_np = NULL, - .fabric_drop_np = NULL, - .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl, - .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl, + .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs, @@ -2162,7 +1906,7 @@ static void __exit tcm_qla2xxx_exit(void) tcm_qla2xxx_deregister_configfs(); } -MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver"); +MODULE_DESCRIPTION("TCM QLA24XX+ series NPIV enabled fabric driver"); MODULE_LICENSE("GPL"); module_init(tcm_qla2xxx_init); module_exit(tcm_qla2xxx_exit); diff --git a/kernel/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/kernel/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 23295115c..3bbf4cb6f 100644 --- a/kernel/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/kernel/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -13,6 +13,8 @@ #include "qla_target.h" struct tcm_qla2xxx_nacl { + struct se_node_acl se_node_acl; + /* From libfc struct fc_rport->port_id */ u32 nport_id; /* Binary World Wide unique Node Name for remote FC Initiator Nport */ @@ -23,8 +25,6 @@ struct tcm_qla2xxx_nacl { struct qla_tgt_sess *qla_tgt_sess; /* Pointer to TCM FC nexus */ struct se_session *nport_nexus; - /* Returned by tcm_qla2xxx_make_nodeacl() */ - struct se_node_acl se_node_acl; }; struct tcm_qla2xxx_tpg_attrib { @@ -57,8 +57,6 @@ struct tcm_qla2xxx_fc_loopid { }; struct tcm_qla2xxx_lport { - /* SCSI protocol the lport is providing */ - u8 lport_proto_id; /* Binary World Wide unique Port Name for FC Target Lport */ u64 lport_wwpn; /* Binary World Wide unique Port Name for FC NPIV Target Lport */ -- cgit 1.2.3-korg